diff --git a/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..bee8643fcd810ed3eb6df3cc5e9ea0cebad20fd2 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:356151db3e8faf82c073da70a6e056425fcc45a2b0970b707a513579f35f3333 +size 50332843 diff --git a/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f80b40277368df38c5aea98d72190ce8e9579cf0 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7d7627bc9a3c917891aa8408fbeeae1dbc23b07b3b9a003d958165eb8bd1246 +size 50332749 diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..ff22c62ad691851b12398dbc9fde26c2685c28f5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:523460a090ec65446cc6a8d4693ef0107650c6fd11c6d977e02f123e9863ce23 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..57860ff67fccc8bce694e5254efa68e5913febe5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e54fbc32a8b1254807e878ba1cd35e94b720b342cbc46a4f1c16bdfd873bd7ce +size 33555612 diff --git a/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..9bb4957906e7e1dcdce26a90f3b251443c1ff0d5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e82149015d85bc08b00227a304e699480097baacbf1704add001ecfa7245340 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..996c2651a3619480d97b02d400a6bb99cd9afad3 --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b81a2876bfdd26b7928ac452190c8e85c73b8a8aa31da6508ef0ff87f6f6469 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e008e5c5f158cb17581199307f1e0d534a6e1cd --- /dev/null +++ b/ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67d90a6232ca06174ec19b38aa8d4bffa27dfadeb7d3c357fc014fd442ceafd6 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__init__.py b/venv/lib/python3.10/site-packages/torch/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b042126d99e217b789853d4a52b871918f9162f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/__init__.py @@ -0,0 +1,1412 @@ +r""" +This package adds support for CUDA tensor types. + +It implements the same function as CPU tensors, but they utilize +GPUs for computation. + +It is lazily initialized, so you can always import it, and use +:func:`is_available()` to determine if your system supports CUDA. + +:ref:`cuda-semantics` has more details about working with CUDA. +""" + + +import contextlib +import importlib +import os +import sys +import threading +import traceback +import warnings +from functools import lru_cache +from typing import Any, Callable, cast, List, Optional, Tuple, Union + +import torch +import torch._C +from torch.types import Device +from .. import device as _device +from .._utils import _dummy_type, _LazySeedTracker, classproperty +from ._utils import _get_device_index +from .graphs import ( + CUDAGraph, + graph, + graph_pool_handle, + is_current_stream_capturing, + make_graphed_callables, +) +from .streams import Event, ExternalStream, Stream + +try: + from torch._C import _cudart # type: ignore[attr-defined] +except ImportError: + _cudart = None + +_initialized = False +_tls = threading.local() +_initialization_lock = threading.Lock() +_queued_calls: List[ + Tuple[Callable[[], None], List[str]] +] = [] # don't invoke these until initialization occurs +_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False) +_device_t = Union[_device, str, int, None] + +_HAS_PYNVML = False +_PYNVML_ERR = None +try: + import pynvml # type: ignore[import] + + _HAS_PYNVML = True +except ImportError as err: + _PYNVML_ERR = err # sometimes a lib is installed but the import fails for some other reason, so we log the error for later + +_lazy_seed_tracker = _LazySeedTracker() + +# Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA +if hasattr(torch._C, "_CudaDeviceProperties"): + _CudaDeviceProperties = torch._C._CudaDeviceProperties +else: + _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties") # type: ignore[assignment, misc] + +if hasattr(torch._C, "_cuda_exchangeDevice"): + _exchange_device = torch._C._cuda_exchangeDevice +else: + + def _exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +if hasattr(torch._C, "_cuda_maybeExchangeDevice"): + _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice +else: + + def _maybe_exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +has_half: bool = True +has_magma: bool = torch._C._has_magma + +default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment] + + +def _is_compiled() -> bool: + r"""Return true if compile with CUDA support.""" + return hasattr(torch._C, "_cuda_getDeviceCount") + + +def _nvml_based_avail() -> bool: + return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1" + + +def is_available() -> bool: + r"""Return a bool indicating if CUDA is currently available.""" + if not _is_compiled(): + return False + if _nvml_based_avail(): + # The user has set an env variable to request this availability check that attempts to avoid fork poisoning by + # using NVML at the cost of a weaker CUDA availability assessment. Note that if NVML discovery/initialization + # fails, this assessment falls back to the default CUDA Runtime API assessment (`cudaGetDeviceCount`) + return device_count() > 0 + else: + # The default availability inspection never throws and returns 0 if the driver is missing or can't + # be initialized. This uses the CUDA Runtime API `cudaGetDeviceCount` which in turn initializes the CUDA Driver + # API via `cuInit` + return torch._C._cuda_getDeviceCount() > 0 + + +def is_bf16_supported(): + r"""Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.""" + # Check for ROCm, if true return true, no ROCM_VERSION check required, + # since it is supported on AMD GPU archs. + if torch.version.hip: + return True + + device = torch.cuda.current_device() + + # Check for CUDA version and device compute capability. + # This is a fast way to check for it. + cuda_version = torch.version.cuda + if ( + cuda_version is not None + and int(cuda_version.split(".")[0]) >= 11 + and torch.cuda.get_device_properties(device).major >= 8 + ): + return True + + # Finally try to create a bfloat16 device. + return _check_bf16_tensor_supported(device) + + +@lru_cache(maxsize=16) +def _check_bf16_tensor_supported(device: _device_t): + try: + torch.tensor([1.0], dtype=torch.bfloat16, device=device) + return True + except Exception: + return False + + +def _sleep(cycles): + torch._C._cuda_sleep(cycles) + + +def _check_capability(): + incorrect_binary_warn = """ + Found GPU%d %s which requires CUDA_VERSION >= %d to + work properly, but your PyTorch was compiled + with CUDA_VERSION %d. Please install the correct PyTorch binary + using instructions from https://pytorch.org + """ + + old_gpu_warn = """ + Found GPU%d %s which is of cuda capability %d.%d. + PyTorch no longer supports this GPU because it is too old. + The minimum cuda capability supported by this library is %d.%d. + """ + + if torch.version.cuda is not None: # on ROCm we don't want this check + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(device_count()): + capability = get_device_capability(d) + major = capability[0] + minor = capability[1] + name = get_device_name(d) + current_arch = major * 10 + minor + min_arch = min( + (int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()), + default=35, + ) + if current_arch < min_arch: + warnings.warn( + old_gpu_warn + % (d, name, major, minor, min_arch // 10, min_arch % 10) + ) + + +def _check_cubins(): + incompatible_device_warn = """ +{} with CUDA capability sm_{} is not compatible with the current PyTorch installation. +The current PyTorch install supports CUDA capabilities {}. +If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ +""" + if torch.version.cuda is None: # on ROCm we don't want this check + return + arch_list = get_arch_list() + if len(arch_list) == 0: + return + supported_sm = [int(arch.split("_")[1]) for arch in arch_list if "sm_" in arch] + for idx in range(device_count()): + cap_major, cap_minor = get_device_capability(idx) + # NVIDIA GPU compute architectures are backward compatible within major version + supported = any(sm // 10 == cap_major for sm in supported_sm) + if not supported: + device_name = get_device_name(idx) + capability = cap_major * 10 + cap_minor + warnings.warn( + incompatible_device_warn.format( + device_name, capability, " ".join(arch_list), device_name + ) + ) + + +def is_initialized(): + r"""Return whether PyTorch's CUDA state has been initialized.""" + return _initialized and not _is_in_bad_fork() + + +def _lazy_call(callable, **kwargs): + if is_initialized(): + callable() + else: + # TODO(torch_deploy): this accesses linecache, which attempts to read the + # file system to get traceback info. Patch linecache or do something + # else here if this ends up being important. + global _lazy_seed_tracker + if kwargs.get("seed_all", False): + _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack()) + elif kwargs.get("seed", False): + _lazy_seed_tracker.queue_seed(callable, traceback.format_stack()) + else: + # Don't store the actual traceback to avoid memory cycle + _queued_calls.append((callable, traceback.format_stack())) + + +_lazy_call(_check_capability) +_lazy_call(_check_cubins) + + +class DeferredCudaCallError(Exception): + pass + + +OutOfMemoryError = torch._C._OutOfMemoryError + + +def init(): + r"""Initialize PyTorch's CUDA state. + + You may need to call this explicitly if you are interacting with + PyTorch via its C API, as Python bindings for CUDA functionality + will not be available until this initialization takes place. + Ordinary users should not need this, as all of PyTorch's CUDA methods + automatically initialize CUDA state on-demand. + + Does nothing if the CUDA state is already initialized. + """ + _lazy_init() + + +def _lazy_init(): + global _initialized, _queued_calls + if is_initialized() or hasattr(_tls, "is_initializing"): + return + with _initialization_lock: + # We be double-checked locking, boys! This is OK because + # the above test was GIL protected anyway. The inner test + # is for when a thread blocked on some other thread which was + # doing the initialization; when they get the lock, they will + # find there is nothing left to do. + if is_initialized(): + return + # It is important to prevent other threads from entering _lazy_init + # immediately, while we are still guaranteed to have the GIL, because some + # of the C calls we make below will release the GIL + if _is_in_bad_fork(): + raise RuntimeError( + "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " + "multiprocessing, you must use the 'spawn' start method" + ) + if not hasattr(torch._C, "_cuda_getDeviceCount"): + raise AssertionError("Torch not compiled with CUDA enabled") + if _cudart is None: + raise AssertionError( + "libcudart functions unavailable. It looks like you have a broken build?" + ) + # This function throws if there's a driver initialization error, no GPUs + # are found or any other error occurs + if "CUDA_MODULE_LOADING" not in os.environ: + os.environ["CUDA_MODULE_LOADING"] = "LAZY" + torch._C._cuda_init() + # Some of the queued calls may reentrantly call _lazy_init(); + # we need to just return without initializing in that case. + # However, we must not let any *other* threads in! + _tls.is_initializing = True + + for calls in _lazy_seed_tracker.get_calls(): + if calls: + _queued_calls.append(calls) + + try: + for queued_call, orig_traceback in _queued_calls: + try: + queued_call() + except Exception as e: + msg = ( + f"CUDA call failed lazily at initialization with error: {str(e)}\n\n" + f"CUDA call was originally invoked at:\n\n{''.join(orig_traceback)}" + ) + raise DeferredCudaCallError(msg) from e + finally: + delattr(_tls, "is_initializing") + _initialized = True + + +def cudart(): + _lazy_init() + return _cudart + + +class cudaStatus: + SUCCESS: int = 0 + ERROR_NOT_READY: int = 34 + + +class CudaError(RuntimeError): + def __init__(self, code: int) -> None: + msg = _cudart.cudaGetErrorString(_cudart.cudaError(code)) + super().__init__(f"{msg} ({code})") + + +def check_error(res: int) -> None: + if res != _cudart.cudaError.success: + raise CudaError(res) + + +class _DeviceGuard: + def __init__(self, index: int): + self.idx = index + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device: + r"""Context-manager that changes the selected device. + + Args: + device (torch.device or int): device index to select. It's a no-op if + this argument is a negative integer or ``None``. + """ + + def __init__(self, device: Any): + self.idx = _get_device_index(device, optional=True) + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device_of(device): + r"""Context-manager that changes the current device to that of given object. + + You can use both tensors and storages as arguments. If a given object is + not allocated on a GPU, this is a no-op. + + Args: + obj (Tensor or Storage): object allocated on the selected device. + """ + + def __init__(self, obj): + idx = obj.get_device() if obj.is_cuda else -1 + super().__init__(idx) + + +def set_device(device: _device_t) -> None: + r"""Set the current device. + + Usage of this function is discouraged in favor of :any:`device`. In most + cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. + + Args: + device (torch.device or int): selected device. This function is a no-op + if this argument is negative. + """ + device = _get_device_index(device) + if device >= 0: + torch._C._cuda_setDevice(device) + + +def get_device_name(device: Optional[_device_t] = None) -> str: + r"""Get the name of a device. + + Args: + device (torch.device or int, optional): device for which to return the + name. This function is a no-op if this argument is a negative + integer. It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Returns: + str: the name of the device + """ + return get_device_properties(device).name + + +def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]: + r"""Get the cuda capability of a device. + + Args: + device (torch.device or int, optional): device for which to return the + device capability. This function is a no-op if this argument is + a negative integer. It uses the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + + Returns: + tuple(int, int): the major and minor cuda capability of the device + """ + prop = get_device_properties(device) + return prop.major, prop.minor + + +def get_device_properties(device: _device_t) -> _CudaDeviceProperties: + r"""Get the properties of a device. + + Args: + device (torch.device or int or str): device for which to return the + properties of the device. + + Returns: + _CudaDeviceProperties: the properties of the device + """ + _lazy_init() # will define _get_device_properties + device = _get_device_index(device, optional=True) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + return _get_device_properties(device) # type: ignore[name-defined] + + +def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool: + r"""Check if peer access between two devices is possible.""" + _lazy_init() + device = _get_device_index(device, optional=True) + peer_device = _get_device_index(peer_device) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + if peer_device < 0 or peer_device >= device_count(): + raise AssertionError("Invalid peer device id") + return torch._C._cuda_canDeviceAccessPeer(device, peer_device) + + +class StreamContext: + r"""Context-manager that selects a given stream. + + All CUDA kernels queued within its context will be enqueued on a selected + stream. + + Args: + Stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + .. note:: Streams are per-device. + """ + cur_stream: Optional["torch.cuda.Stream"] + + def __init__(self, stream: Optional["torch.cuda.Stream"]): + self.stream = stream + self.idx = _get_device_index(None, True) + if not torch.jit.is_scripting(): + if self.idx is None: + self.idx = -1 + + self.src_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + self.dst_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + + def __enter__(self): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # Return if stream is None or CUDA device not available + if cur_stream is None or self.idx == -1: + return + self.src_prev_stream = torch.cuda.current_stream(None) + + # If the stream is not on the current device, then + # set the current stream on the device + if self.src_prev_stream.device != cur_stream.device: + with device(cur_stream.device): + self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device) + torch.cuda.set_stream(cur_stream) + + def __exit__(self, type: Any, value: Any, traceback: Any): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # If stream is None or no CUDA device available, return + if cur_stream is None or self.idx == -1: + return + + # Reset the stream on the original device + # and destination device + if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr] + torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type] + torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type] + + +def stream(stream: Optional["torch.cuda.Stream"]) -> StreamContext: + r"""Wrap around the Context-manager StreamContext that selects a given stream. + + Arguments: + stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + ..Note:: In eager mode stream is of type Stream class while in JIT it is + an object of the custom class ``torch.classes.cuda.Stream``. + """ + return StreamContext(stream) + + +def _set_stream_by_id(stream_id, device_index, device_type): + r"""set stream specified by the stream id, device index and + device type + + Args: stream_id (int): stream id in stream pool + device_index (int): device index in topo + device_type (int): enum device type + """ + torch._C._cuda_setStream( + stream_id=stream_id, + device_index=device_index, + device_type=device_type, + ) + + +def set_stream(stream: Stream): + r"""Set the current stream.This is a wrapper API to set the stream. + Usage of this function is discouraged in favor of the ``stream`` + context manager. + + Args: + stream (Stream): selected stream. This function is a no-op + if this argument is ``None``. + """ + if stream is None: + return + _set_stream_by_id( + stream_id=stream.stream_id, + device_index=stream.device_index, + device_type=stream.device_type, + ) + + +def _parse_visible_devices() -> Union[List[int], List[str]]: + r"""Parse CUDA_VISIBLE_DEVICES environment variable.""" + var = os.getenv("CUDA_VISIBLE_DEVICES") + if var is None: + return list(range(64)) + + def _strtoul(s: str) -> int: + """Return -1 or positive integer sequence string starts with.""" + if not s: + return -1 + for idx, c in enumerate(s): + if not (c.isdigit() or (idx == 0 and c in "+-")): + break + if idx + 1 == len(s): + idx += 1 + return int(s[:idx]) if idx > 0 else -1 + + def parse_list_with_prefix(lst: str, prefix: str) -> List[str]: + rcs: List[str] = [] + for elem in lst.split(","): + # Repeated id results in empty set + if elem in rcs: + return cast(List[str], []) + # Anything other but prefix is ignored + if not elem.startswith(prefix): + break + rcs.append(elem) + return rcs + + if var.startswith("GPU-"): + return parse_list_with_prefix(var, "GPU-") + if var.startswith("MIG-"): + return parse_list_with_prefix(var, "MIG-") + # CUDA_VISIBLE_DEVICES uses something like strtoul + # which makes `1gpu2,2ampere` is equivalent to `1,2` + rc: List[int] = [] + for elem in var.split(","): + x = _strtoul(elem.strip()) + # Repeated ordinal results in empty set + if x in rc: + return cast(List[int], []) + # Negative value aborts the sequence + if x < 0: + break + rc.append(x) + return rc + + +def _raw_device_count_nvml() -> int: + r"""Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed.""" + from ctypes import byref, c_int, CDLL + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return -1 + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return -1 + del nvml_h + return dev_count.value + + +def _raw_device_uuid_nvml() -> Optional[List[str]]: + r"""Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed.""" + from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return None + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return None + uuids: List[str] = [] + for idx in range(dev_count.value): + dev_id = c_void_p() + rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id)) + if rc != 0: + warnings.warn("Can't get device handle") + return None + buf_len = 96 + buf = create_string_buffer(buf_len) + rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len) + if rc != 0: + warnings.warn("Can't get device UUID") + return None + uuids.append(buf.raw.decode("ascii").strip("\0")) + del nvml_h + return uuids + + +def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]: + r"""Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs.""" + + def uuid_to_orinal(candidate: str, uuids: List[str]) -> int: + best_match = -1 + for idx, uuid in enumerate(uuids): + if not uuid.startswith(candidate): + continue + # Ambiguous candidate + if best_match != -1: + return -1 + best_match = idx + return best_match + + rc: List[int] = [] + for candidate in candidates: + idx = uuid_to_orinal(candidate, uuids) + # First invalid ordinal stops parsing + if idx < 0: + break + # Duplicates result in empty set + if idx in rc: + return cast(List[int], []) + rc.append(idx) + return rc + + +def _device_count_nvml() -> int: + r"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account. + + Negative value is returned if NVML discovery or initialization has failed. + """ + visible_devices = _parse_visible_devices() + if not visible_devices: + return 0 + try: + if type(visible_devices[0]) is str: + # Skip MIG parsing + if visible_devices[0].startswith("MIG-"): + return -1 + uuids = _raw_device_uuid_nvml() + if uuids is None: + return -1 + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + else: + raw_cnt = _raw_device_count_nvml() + if raw_cnt <= 0: + return raw_cnt + # Trim the list up to a maximum available device + for idx, val in enumerate(visible_devices): + if cast(int, val) >= raw_cnt: + return idx + except OSError: + return -1 + except AttributeError: + return -1 + return len(visible_devices) + + +def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int: + r"""Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account.""" + idx = _get_device_index(device, optional=True) + visible_devices = _parse_visible_devices() + if type(visible_devices[0]) is str: + uuids = _raw_device_uuid_nvml() + if uuids is None: + raise RuntimeError("Can't get device UUIDs") + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + visible_devices = cast(List[int], visible_devices) + if idx < 0 or idx >= len(visible_devices): + raise RuntimeError( + f"device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})" + ) + return visible_devices[idx] + + +@lru_cache(maxsize=1) +def device_count() -> int: + r"""Return the number of GPUs available.""" + if not _is_compiled(): + return 0 + # bypass _device_count_nvml() if rocm (not supported) + nvml_count = -1 if torch.version.hip else _device_count_nvml() + return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count + + +def get_arch_list() -> List[str]: + r"""Return list CUDA architectures this library was compiled for.""" + if not is_available(): + return [] + arch_flags = torch._C._cuda_getArchFlags() + if arch_flags is None: + return [] + return arch_flags.split() + + +def get_gencode_flags() -> str: + r"""Return NVCC gencode flags this library was compiled with.""" + arch_list = get_arch_list() + if len(arch_list) == 0: + return "" + arch_list_ = [arch.split("_") for arch in arch_list] + return " ".join( + [ + f"-gencode compute=compute_{arch},code={kind}_{arch}" + for (kind, arch) in arch_list_ + ] + ) + + +def current_device() -> int: + r"""Return the index of a currently selected device.""" + _lazy_init() + return torch._C._cuda_getDevice() + + +def synchronize(device: _device_t = None) -> None: + r"""Wait for all kernels in all streams on a CUDA device to complete. + + Args: + device (torch.device or int, optional): device for which to synchronize. + It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + _lazy_init() + with torch.cuda.device(device): + return torch._C._cuda_synchronize() + + +def ipc_collect(): + r"""Force collects GPU memory after it has been released by CUDA IPC. + + .. note:: + Checks if any sent CUDA tensors could be cleaned from the memory. Force + closes shared memory file used for reference counting if there is no + active counters. Useful when the producer process stopped actively sending + tensors and want to release unused memory. + """ + _lazy_init() + return torch._C._cuda_ipc_collect() + + +def current_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the currently selected :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the currently selected :class:`Stream` for the current device, given + by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getCurrentStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def default_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the default :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the default :class:`Stream` for the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getDefaultStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def current_blas_handle(): + r"""Return cublasHandle_t pointer to current cuBLAS handle""" + _lazy_init() + return torch._C._cuda_getCurrentBlasHandle() + + +def set_sync_debug_mode(debug_mode: Union[int, str]) -> None: + r"""Set the debug mode for cuda synchronizing operations. + + Args: + debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations, + if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations. + + Warning: + This is an experimental feature, and not all synchronizing operations will trigger warning or error. In + particular, operations in torch.distributed and torch.sparse namespaces are not covered yet. + """ + _lazy_init() + if isinstance(debug_mode, str): + if debug_mode == "default": + debug_mode = 0 + elif debug_mode == "warn": + debug_mode = 1 + elif debug_mode == "error": + debug_mode = 2 + else: + raise RuntimeError( + "invalid value of debug_mode, expected one of `default`, `warn`, `error`" + ) + + torch._C._cuda_set_sync_debug_mode(debug_mode) + + +def get_sync_debug_mode() -> int: + r"""Return current value of debug mode for cuda synchronizing operations.""" + _lazy_init() + return torch._C._cuda_get_sync_debug_mode() + + +def _get_pynvml_handler(device: Optional[Union[Device, int]] = None): + if not _HAS_PYNVML: + raise ModuleNotFoundError( + "pynvml does not seem to be installed or it can't be imported." + ) from _PYNVML_ERR + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded as e: + raise RuntimeError("cuda driver can't be loaded, is cuda enabled?") from e + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return handle + + +def memory_usage(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which global (device) + memory was being read or written as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler() + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).memory + + +def utilization(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which one or + more kernels was executing on the GPU as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu + + +def temperature(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average temperature of the GPU sensor in Degrees C (Centigrades). + + The average temperature is computed based on past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + # 0 refers to the temperature sensor for the GPU die. + return pynvml.nvmlDeviceGetTemperature(handle, 0) + + +def power_draw(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average power draw of the GPU sensor in mW (MilliWatts) + over the past sample period as given by `nvidia-smi` for Fermi or newer fully supported devices. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetPowerUsage(handle) + + +def clock_rate(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the clock speed of the GPU SM in Hz Hertz over the past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetClockInfo(handle, 1) + + +def _get_device(device: Union[int, str, torch.device]) -> torch.device: + r"""Return the torch.device type object from the passed in device. + + Args: + device (torch.device or int): selected device. + """ + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + return device + + +def _get_generator(device: torch.device) -> torch._C.Generator: + r"""Return the CUDA Generator object for the given device. + + Args: + device (torch.device): selected device. + """ + idx = device.index + if idx is None: + idx = current_device() + return torch.cuda.default_generators[idx] + + +def _set_rng_state_offset( + offset: int, device: Union[int, str, torch.device] = "cuda" +) -> None: + r"""Set the random number generator state offset of the specified GPU. + + Args: + offset (int): The desired offset + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + final_device = _get_device(device) + + def cb(): + default_generator = _get_generator(final_device) + default_generator.set_offset(offset) + + _lazy_call(cb) + + +def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int: + r"""Return the random number generator state offset of the specified GPU. + + Args: + device (torch.device or int, optional): The device to return the RNG state offset of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + final_device = _get_device(device) + default_generator = _get_generator(final_device) + return default_generator.get_offset() + + +from .memory import * # noqa: F403 + + +from .random import * # noqa: F403 + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + + +@staticmethod # type: ignore[misc] +def _lazy_new(cls, *args, **kwargs): + _lazy_init() + # We may need to call lazy init again if we are a forked child + # del _CudaBase.__new__ + return super(_CudaBase, cls).__new__(cls, *args, **kwargs) + + +class _CudaBase: + is_cuda = True + is_sparse = False + + def type(self, *args, **kwargs): + # We could use a Protocol here to tell mypy that self has `get_device` method + # but it is only available in the typing module on Python >= 3.8 + # or on typing_extensions module on Python >= 3.6 + with device(self.get_device()): # type: ignore[attr-defined] + return super().type(*args, **kwargs) # type: ignore[misc] + + __new__ = _lazy_new + + +from torch.storage import _LegacyStorage, _warn_typed_storage_removal + + +class _CudaLegacyStorage(_LegacyStorage): + @classmethod + def from_buffer(cls, *args, **kwargs): + _warn_typed_storage_removal() + raise RuntimeError("from_buffer: Not available for CUDA storage") + + @classmethod + def _new_with_weak_ptr(cls, *args, **kwargs): + raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage") + + @classmethod + def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None): + raise RuntimeError("_new_shared_filename: Not available for CUDA storage") + + +class ByteStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.uint8 + + +class DoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.double + + +class FloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.float + + +class HalfStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.half + + +class LongStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.long + + +class IntStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int + + +class ShortStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.short + + +class CharStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int8 + + +class BoolStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bool + + +class BFloat16Storage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bfloat16 + + +class ComplexDoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cdouble + + +class ComplexFloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cfloat + + +del _LegacyStorage +del _CudaLegacyStorage + +torch._storage_classes.add(DoubleStorage) +torch._storage_classes.add(FloatStorage) +torch._storage_classes.add(LongStorage) +torch._storage_classes.add(IntStorage) +torch._storage_classes.add(ShortStorage) +torch._storage_classes.add(CharStorage) +torch._storage_classes.add(ByteStorage) +torch._storage_classes.add(HalfStorage) +torch._storage_classes.add(BoolStorage) +torch._storage_classes.add(BFloat16Storage) +torch._storage_classes.add(ComplexDoubleStorage) +torch._storage_classes.add(ComplexFloatStorage) + + +class _WrappedTritonKernel: + """Just a simple wrapper to store some metadata for testing purposes.""" + + def __init__(self, kernel): + self.kernel = kernel + self.kernel_invoked = False + + def __call__(self, *args, **kwargs): + res = self.kernel(*args, **kwargs) + self.kernel_invoked = True + return res + + +def _register_triton_kernels(): + if torch._running_with_deploy(): + return + + @_WrappedTritonKernel + def kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_mm + + return bsr_dense_mm(*args, skip_checks=True, **kwargs) + + @_WrappedTritonKernel + def addmm_kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_addmm + + return bsr_dense_addmm(*args, skip_checks=True, **kwargs) + + has_triton = importlib.util.find_spec("triton") is not None + if has_triton: + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_mm_out", + "_triton_bsr_dense_mm_out(Tensor bsr, Tensor dense, *, Tensor(a!) out) -> Tensor(a!)", + kernel_impl, + "SparseCsrCUDA", + ) + + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_addmm_out", + ( + "_triton_bsr_dense_addmm_out(Tensor input, Tensor bsr, Tensor dense," + " *, Scalar beta, Scalar alpha, Tensor(a!) out) -> Tensor(a!)" + ), + addmm_kernel_impl, + "SparseCsrCUDA", + ) + + +_lazy_call(_register_triton_kernels) + + +from . import amp, jiterator, nvtx, profiler, sparse + +__all__ = [ + # Typed storage and tensors + "BFloat16Storage", + "BFloat16Tensor", + "BoolStorage", + "BoolTensor", + "ByteStorage", + "ByteTensor", + "CharStorage", + "CharTensor", + "ComplexDoubleStorage", + "ComplexFloatStorage", + "DoubleStorage", + "DoubleTensor", + "FloatStorage", + "FloatTensor", + "HalfStorage", + "HalfTensor", + "IntStorage", + "IntTensor", + "LongStorage", + "LongTensor", + "ShortStorage", + "ShortTensor", + "CUDAGraph", + "CudaError", + "DeferredCudaCallError", + "Event", + "ExternalStream", + "OutOfMemoryError", + "Stream", + "StreamContext", + "amp", + "caching_allocator_alloc", + "caching_allocator_delete", + "can_device_access_peer", + "check_error", + "cudaStatus", + "cudart", + "current_blas_handle", + "current_device", + "current_stream", + "default_generators", + "default_stream", + "device", + "device_count", + "device_of", + "empty_cache", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", + "get_arch_list", + "get_device_capability", + "get_device_name", + "get_device_properties", + "get_gencode_flags", + "get_rng_state", + "get_rng_state_all", + "get_sync_debug_mode", + "graph", + "graph_pool_handle", + "graphs", + "has_half", + "has_magma", + "init", + "initial_seed", + "ipc_collect", + "is_available", + "is_bf16_supported", + "is_current_stream_capturing", + "is_initialized", + "jiterator", + "list_gpu_processes", + "make_graphed_callables", + "manual_seed", + "manual_seed_all", + "max_memory_allocated", + "max_memory_cached", + "max_memory_reserved", + "mem_get_info", + "memory", + "memory_allocated", + "memory_cached", + "memory_reserved", + "memory_snapshot", + "memory_stats", + "memory_stats_as_nested_dict", + "memory_summary", + "memory_usage", + "temperature", + "power_draw", + "clock_rate", + "nccl", + "nvtx", + "profiler", + "random", + "reset_accumulated_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "reset_peak_memory_stats", + "seed", + "seed_all", + "set_device", + "set_per_process_memory_fraction", + "set_rng_state", + "set_rng_state_all", + "set_stream", + "set_sync_debug_mode", + "sparse", + "stream", + "streams", + "synchronize", + "utilization", +] diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6f2419e06b4dbfe235a670141613b608ce88092 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4b34e07df3f2e1db8dead2e56955d6303d1cd7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6352b0ec84e25c5147739ae7509cf25cb4c021fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90621dcb1adcd3f111e210f4dceec6437c2c1123 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..360dd39e5acaefba29176d634d4ed40a9ac8e442 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7cd1166285117b92fb5e713c65966a27ca8e568 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1475b38ea261231da7edff22d3861d159d00ad40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c278f42687ade1a3373af3afd140a03d9b5ae408 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fc78b207ee8a6c6924934b4f171ecbc93808a8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed55d3a52f7dbd67e329cd760230d69a8486001c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5879098c5d637e98c4bfb4b6157ccc7fe464e00d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd8c5e5eb3a5ec30b18bb339446c4acc0292125e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19e0fa8751d442d1cdc688b7e6c7ffa03d90e5ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0ca634bfedd4b053b791637af38a7127479a7df Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b527c8339303be63a7a81356c2d3ef8e0d12e922 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/__init__.py b/venv/lib/python3.10/site-packages/torch/cuda/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..003cca1f0cd4e910d0c042cf94501aa17a1bb2f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/amp/__init__.py @@ -0,0 +1,11 @@ +from .autocast_mode import autocast, custom_bwd, custom_fwd +from .common import amp_definitely_not_available +from .grad_scaler import GradScaler + +__all__ = [ + "amp_definitely_not_available", + "autocast", + "custom_bwd", + "custom_fwd", + "GradScaler", +] diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d714b6a55f8ed3e1f0fce429727f4fac0cab6e5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38101d138d14857c9229b3d93e50dd5d546ce689 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66da72558185b974fd4b52604fe68984ff54a8fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bba3f8a1c9f50fbdb2fc109e82f5dfb1cf05ca16 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py b/venv/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..88ff04d86648806a21b180ae79e6a58bf5b22685 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py @@ -0,0 +1,144 @@ +import collections +import functools + +import torch + +try: + import numpy as np + + HAS_NUMPY = True +except ModuleNotFoundError: + np = None # type: ignore[assignment] +from typing import Any + +__all__ = ["autocast", "custom_fwd", "custom_bwd"] + + +class autocast(torch.amp.autocast_mode.autocast): + r"""See :class:`torch.autocast`. + + ``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)`` + """ + + def __init__( + self, + enabled: bool = True, + dtype: torch.dtype = torch.float16, + cache_enabled: bool = True, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = "cuda" + self.fast_dtype = dtype + return + super().__init__( + "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled + ) + + def __enter__(self): + if torch._jit_internal.is_scripting(): + return self + return super().__enter__() + + # TODO: discuss a unified TorchScript-friendly API for autocast + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + return super().__exit__(exc_type, exc_val, exc_tb) + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return super().__call__(func) + + +# Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which +# may be falsely detected as "Iterables." +def _cast(value, dtype): + if isinstance(value, torch.Tensor): + is_eligible = ( + value.is_floating_point() + and value.is_cuda + and (value.dtype is not torch.float64) + ) + return value.to(dtype) if is_eligible else value + elif isinstance(value, (str, bytes)): + return value + elif HAS_NUMPY and isinstance(value, np.ndarray): + return value + elif isinstance(value, collections.abc.Mapping): + return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()} + elif isinstance(value, collections.abc.Iterable): + iterable = (_cast(v, dtype) for v in value) + if isinstance(value, (list, tuple)): + return type(value)(iterable) + else: + return iterable + else: + return value + + +# custom_fwd is a decorator that may or may not be used with arguments, following +# https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument. +# this works: +# @custom_fwd +# def forward(...): +# this also works: +# @custom_fwd(cast_inputs=torch.float) +# def forward(...): +def custom_fwd(fwd=None, *, cast_inputs=None): + """ + Create a helper decorator for ``forward`` methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + See the :ref:`example page` for more detail. + + Args: + cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``, + when ``forward`` runs in an autocast-enabled region, casts incoming + floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected), + then executes ``forward`` with autocast disabled. + If ``None``, ``forward``'s internal ops execute with the current autocast state. + + .. note:: + If the decorated ``forward`` is called outside an autocast-enabled region, + :func:`custom_fwd` is a no-op and ``cast_inputs`` has no effect. + """ + if fwd is None: + return functools.partial(custom_fwd, cast_inputs=cast_inputs) + + @functools.wraps(fwd) + def decorate_fwd(*args, **kwargs): + args[0]._dtype = torch.get_autocast_gpu_dtype() + if cast_inputs is None: + args[0]._fwd_used_autocast = torch.is_autocast_enabled() + return fwd(*args, **kwargs) + else: + autocast_context = torch.is_autocast_enabled() + args[0]._fwd_used_autocast = False + if autocast_context: + with autocast(enabled=False): + return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs)) + else: + return fwd(*args, **kwargs) + + return decorate_fwd + + +# Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate +# cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match +# cast_inputs supplied to custom_fwd. +def custom_bwd(bwd): + """Create a helper decorator for backward methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + Ensures that ``backward`` executes with the same autocast state as ``forward``. + See the :ref:`example page` for more detail. + """ + + @functools.wraps(bwd) + def decorate_bwd(*args, **kwargs): + with autocast(enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype): + return bwd(*args, **kwargs) + + return decorate_bwd diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/common.py b/venv/lib/python3.10/site-packages/torch/cuda/amp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e8c1cc99b00d63672e12f2908a82c899076306 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/amp/common.py @@ -0,0 +1,9 @@ +from importlib.util import find_spec + +import torch + +__all__ = ["amp_definitely_not_available"] + + +def amp_definitely_not_available(): + return not (torch.cuda.is_available() or find_spec("torch_xla")) diff --git a/venv/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py b/venv/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebaa9bced2ca0a2758cc4211308b2ad51437833 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py @@ -0,0 +1,28 @@ +import torch +from torch.amp.grad_scaler import OptState + +__all__ = ["GradScaler", "OptState"] + + +class GradScaler(torch.amp.GradScaler): + r""" + See :class:`torch.amp.GradScaler`. + ``torch.cuda.amp.GradScaler(args...)`` is equivalent to ``torch.amp.GradScaler("cuda", args...)`` + """ + + def __init__( + self, + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + super().__init__( + "cuda", + init_scale=init_scale, + growth_factor=growth_factor, + backoff_factor=backoff_factor, + growth_interval=growth_interval, + enabled=enabled, + ) diff --git a/venv/lib/python3.10/site-packages/torch/cuda/jiterator.py b/venv/lib/python3.10/site-packages/torch/cuda/jiterator.py new file mode 100644 index 0000000000000000000000000000000000000000..25d25482419e635612855ed402fd02ef58709417 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/jiterator.py @@ -0,0 +1,185 @@ +import re +from typing import Callable, List + +import torch +from torch import Tensor + +__all__: List[str] = [] + + +class _CodeParser: + def __init__(self, code_string: str): + optional_ws = r"\s*" + required_ws = r"\s+" + template_params = r"(?P\<.+\>)" + return_type = r"(?P\w+)" + function_name = r"(?P\w+)" + function_params = r"(?P\(.+\))" + function_body = r"(?P\{.+\})" + + pattern = ( + optional_ws + + "template" + + optional_ws + + template_params + + optional_ws + + return_type + + required_ws + + function_name + + optional_ws + + function_params + + optional_ws + + function_body + + optional_ws + ) + + result = re.match( + pattern, code_string, re.DOTALL + ) # DOTALL for matching multiline + + if result is None: + raise Exception( + f"Couldn't parse code, please check correctness:\n {code_string}" + ) + + self.template_params = result["template_params"] + self.return_type = result["return_type"] + self.function_name = result["function_name"] + self.function_params = result["function_params"] + self.function_body = result["function_body"] + + +class _JittedFunction: + def __init__( + self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs + ): + self.code_string = code_string + + assert ( + return_by_ref or num_outputs == 1 + ), "Return by value only works for single output. " + self.return_by_ref = return_by_ref + self.num_outputs = num_outputs + + parsed_code = _CodeParser(code_string) + self.kernel_name = parsed_code.function_name + + self.kwargs_dict = kwargs + self.is_cuda_available = torch.cuda.is_available() + + def __call__(self, *tensors: Tensor, **kwargs): + # Jiterator follow torch.cuda's lazy initialization behavior + # Defer checking cuda's availability at the function invocation time + assert ( + self.is_cuda_available + ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available." + + assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs." + + expanded_kwargs = self.kwargs_dict.copy() + for key, value in kwargs.items(): + if key in self.kwargs_dict: + expanded_kwargs[key] = value + else: + raise KeyError(f"{key} is not declared in function definition") + + return torch._C._cuda_jiterator_compile_and_launch_kernel( + self.code_string, + self.kernel_name, + self.return_by_ref, + self.num_outputs, + tensors, + expanded_kwargs, + ) + + +def _create_jit_fn(code_string: str, **kwargs) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op. + + The code string has to be a valid CUDA function that describes the computation for a single element. The code + string has to follow the c++ template pattern, as shown in the example below. This function will be inlined + into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as + local temp dir. + + Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value. + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + code_string also allows multiple function definitions, and the last function will be treated as the entry function. + + Example:: + + code_string = "template T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }" + code_string += "template T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }" + jitted_fn = create_jit_fn(code_string, val=0.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b) # using default val=0.0 + + Jiterator can be used together with python registration to override an operator's cuda kernel. + Following example is overriding gelu's cuda kernel with relu. + + Example:: + + code_string = "template T my_gelu(T a) { return a > 0 ? a : 0; }" + my_gelu = create_jit_fn(code_string) + my_lib = torch.library.Library("aten", "IMPL") + my_lib.impl('aten::gelu', my_gelu, "CUDA") + # torch.nn.GELU and torch.nn.function.gelu are now overridden + a = torch.rand(3, device='cuda') + torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a)) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 1 output + + .. warning:: + All input tensors must live in CUDA device + """ + return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs) + + +def _create_multi_output_jit_fn( + code_string: str, num_outputs: int, **kwargs +) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference. + num_outputs(int): number of outputs return by the kernel + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 8 outputs + """ + return _JittedFunction( + code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs + ) diff --git a/venv/lib/python3.10/site-packages/torch/cuda/memory.py b/venv/lib/python3.10/site-packages/torch/cuda/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..60440c58dc1d057b744fc91a6254757b74839225 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/memory.py @@ -0,0 +1,914 @@ +r"""This package adds support for device memory management implemented in CUDA.""" + +import collections +import contextlib +import ctypes +import pickle +import sys +import warnings +from inspect import signature + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +from torch import _C + +from torch.types import Device +from .._utils import _dummy_type +from . import _get_device_index, _get_nvml_device_index, _lazy_init, is_initialized + +from ._memory_viz import memory as _memory, segments as _segments + +__all__ = [ + "caching_allocator_alloc", + "caching_allocator_delete", + "set_per_process_memory_fraction", + "empty_cache", + "memory_stats", + "memory_stats_as_nested_dict", + "reset_accumulated_memory_stats", + "reset_peak_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "memory_allocated", + "max_memory_allocated", + "memory_reserved", + "max_memory_reserved", + "memory_cached", + "max_memory_cached", + "memory_snapshot", + "memory_summary", + "list_gpu_processes", + "mem_get_info", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", +] + + +if not hasattr(torch._C, "_cuda_CUDAAllocator"): + # Define dummy base classes + torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator") + + +def _host_allocator(): + _lazy_init() + return torch._C._cuda_cudaHostAllocator() + + +@contextlib.contextmanager +def _free_mutex(): + torch._C._cuda_lock_mutex() + try: + yield + finally: + torch._C._cuda_unlock_mutex() + + +def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None): + r"""Perform a memory allocation using the CUDA memory allocator. + + Memory is allocated for a given device and a stream, this + function is intended to be used for interoperability with other + frameworks. Allocated memory is released through + :func:`~torch.cuda.caching_allocator_delete`. + + Args: + size (int): number of bytes to be allocated. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then + the default stream for the selected device is used. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if stream is None: + stream = torch.cuda.current_stream(device) + if isinstance(stream, torch.cuda.streams.Stream): + stream = stream.cuda_stream + if not isinstance(stream, int): + raise TypeError( + "Invalid type for stream argument, must be " + "`torch.cuda.Stream` or `int` representing a pointer " + "to a existing stream" + ) + with torch.cuda.device(device): + return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream) + + +def caching_allocator_delete(mem_ptr): + r"""Delete memory allocated using the CUDA memory allocator. + + Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`. + is freed here. The associated device and stream are tracked inside + the allocator. + + Args: + mem_ptr (int): memory address to be freed by the allocator. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr) + + +def set_per_process_memory_fraction( + fraction, device: Union[Device, int] = None +) -> None: + r"""Set memory fraction for a process. + + The fraction is used to limit an caching allocator to allocated memory on a CUDA device. + The allowed value equals the total visible memory multiplied fraction. + If trying to allocate more than the allowed value in a process, will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + .. note:: + In general, the total available free memory is less than the total capacity. + """ + _lazy_init() + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 1: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1") + + torch._C._cuda_setMemoryFraction(fraction, device) + + +def empty_cache() -> None: + r"""Release all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU application and visible in + `nvidia-smi`. + + .. note:: + :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU + memory available for PyTorch. However, it may help reduce fragmentation + of GPU memory in certain cases. See :ref:`cuda-memory-management` for + more details about GPU memory management. + """ + if is_initialized(): + torch._C._cuda_emptyCache() + + +def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return a dictionary of CUDA memory allocator statistics for a given device. + + The return value of this function is a dictionary of statistics, each of + which is a non-negative integer. + + Core statistics: + + - ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of allocation requests received by the memory allocator. + - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of allocated memory. + - ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of reserved segments from ``cudaMalloc()``. + - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of reserved memory. + - ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of active memory blocks. + - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of active memory. + - ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of inactive, non-releasable memory blocks. + - ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of inactive, non-releasable memory. + + For these core statistics, values are broken down as follows. + + Pool type: + + - ``all``: combined statistics across all memory pools. + - ``large_pool``: statistics for the large allocation pool + (as of October 2019, for size >= 1MB allocations). + - ``small_pool``: statistics for the small allocation pool + (as of October 2019, for size < 1MB allocations). + + Metric type: + + - ``current``: current value of this metric. + - ``peak``: maximum value of this metric. + - ``allocated``: historical total increase in this metric. + - ``freed``: historical total decrease in this metric. + + In addition to the core statistics, we also provide some simple event + counters: + + - ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that + result in a cache flush and retry. + - ``"num_ooms"``: number of out-of-memory errors thrown. + + The caching allocator can be configured via ENV to not split blocks larger than a + defined size (see Memory Management section of the Cuda Semantics documentation). + This helps avoid memory fragmentation but may have a performance + penalty. Additional outputs to assist with tuning and evaluating impact: + + - ``"max_split_size"``: blocks above this size will not be split. + - ``"oversize_allocations.{current,peak,allocated,freed}"``: + number of over-size allocation requests received by the memory allocator. + - ``"oversize_segments.{current,peak,allocated,freed}"``: + number of over-size reserved segments from ``cudaMalloc()``. + + The caching allocator can be configured via ENV to round memory allocations in order + to reduce fragmentation. Sometimes the overhead from rounding can be higher than + the fragmentation it helps reduce. The following stat can be used to check if + rounding adds too much overhead: + + - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + memory requested by client code, compare this with allocated_bytes to check if + allocation rounding adds too much overhead. + + Args: + device (torch.device or int, optional): selected device. Returns + statistics for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + + .. note:: + With :ref:`backend:cudaMallocAsync`, some stats are not + meaningful, and are always reported as zero. + """ + result = [] + + def _recurse_add_to_result(prefix, obj): + if isinstance(obj, dict): + if len(prefix) > 0: + prefix += "." + for k, v in obj.items(): + _recurse_add_to_result(prefix + k, v) + else: + result.append((prefix, obj)) + + stats = memory_stats_as_nested_dict(device=device) + _recurse_add_to_result("", stats) + result.sort() + + return collections.OrderedDict(result) + + +def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary.""" + if not is_initialized(): + return {} + device = _get_device_index(device, optional=True) + return torch._C._cuda_memoryStats(device) + + +def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to + the `"allocated"` and `"freed"` keys in each individual stat dict, as well as + `"num_alloc_retries"` and `"num_ooms"`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetAccumulatedMemoryStats(device) + + +def reset_peak_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "peak" stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the + `"peak"` key in each individual stat dict. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetPeakMemoryStats(device) + + +def reset_max_memory_allocated(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device. + + See :func:`~torch.cuda.max_memory_allocated` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def reset_max_memory_cached(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. + + See :func:`~torch.cuda.max_memory_cached` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory occupied by tensors in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + This is likely less than the amount shown in `nvidia-smi` since some + unused memory can be held by the caching allocator and some context + needs to be created on GPU. See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + return memory_stats(device=device).get("allocated_bytes.all.current", 0) + + +def max_memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory occupied by tensors in bytes for a given device. + + By default, this returns the peak allocated memory since the beginning of + this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to + reset the starting point in tracking this metric. For example, these two + functions can measure the peak allocated memory usage of each iteration in a + training loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("allocated_bytes.all.peak", 0) + + +def memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory managed by the caching allocator in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.current", 0) + + +def max_memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device. + + By default, this returns the peak cached memory since the beginning of this + program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset + the starting point in tracking this metric. For example, these two functions + can measure the peak cached memory amount of each iteration in a training + loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.peak", 0) + + +def memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.memory_reserved`.""" + warnings.warn( + "torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved", + FutureWarning, + ) + return memory_reserved(device=device) + + +def max_memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`.""" + warnings.warn( + "torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved", + FutureWarning, + ) + return max_memory_reserved(device=device) + + +def memory_snapshot(): + r"""Return a snapshot of the CUDA memory allocator state across all devices. + + Interpreting the output of this function requires familiarity with the + memory allocator internals. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return torch._C._cuda_memorySnapshot()["segments"] + + +def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str: + r"""Return a human-readable printout of the current memory allocator statistics for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + abbreviated (bool, optional): whether to return an abbreviated summary + (default: False). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + stats = memory_stats(device=device) + + def _format_size(sz, pref_sz): + prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_sz < 768 * 1024: + break + prefix = new_prefix + sz //= 1024 + pref_sz /= 1024 + return f"{sz:6d} {prefix}" + + def _format_count(cnt, pref_cnt): + prefixes = [" ", "K", "M"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_cnt < 750 * 1000: + break + prefix = new_prefix + cnt //= 1000 + pref_cnt /= 1000 + return f"{cnt:7d} {prefix} " + + metrics_to_display = [ + ("allocated_bytes", "Allocated memory", _format_size), + ("active_bytes", "Active memory", _format_size), + ("requested_bytes", "Requested memory", _format_size), + ("reserved_bytes", "GPU reserved memory", _format_size), + ("inactive_split_bytes", "Non-releasable memory", _format_size), + ("allocation", "Allocations", _format_count), + ("active", "Active allocs", _format_count), + ("segment", "GPU reserved segments", _format_count), + ("inactive_split", "Non-releasable allocs", _format_count), + ] + + lines = [] + lines.append("=" * 75) + lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ") + lines.append("-" * 75) + lines.append( + " {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} " + ) + lines.append("=" * 75) + lines.append( + " Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed " + ) + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + submetrics = [("all", metric_name)] + if not abbreviated: + submetrics.append(("large_pool", " from large pool")) + submetrics.append(("small_pool", " from small pool")) + + current_prefval, peak_prefval, allocated_prefval, freed_prefval = ( + None, + None, + None, + None, + ) + + for submetric_key, submetric_name in submetrics: + prefix = metric_key + "." + submetric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + if current_prefval is None: + current_prefval = current + peak_prefval = peak + allocated_prefval = allocated + freed_prefval = freed + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + submetric_name, + formatter(current, current_prefval), + formatter(peak, peak_prefval), + formatter(allocated, allocated_prefval), + formatter(freed, freed_prefval), + ), + ) + + metrics_to_display = [ + ("oversize_allocations", "Oversize allocations", _format_count), + ("oversize_segments", "Oversize GPU segments", _format_count), + ] + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + + prefix = metric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + metric_name, + formatter(current, current), + formatter(peak, peak), + formatter(allocated, allocated), + formatter(freed, freed), + ), + ) + + lines.append("=" * 75) + + fmt_dict = {"_": "", "device": device} + for k, v in stats.items(): + fmt_dict[k.replace(".", "-")] = v + return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n" + + +def list_gpu_processes(device: Union[Device, int] = None) -> str: + r"""Return a human-readable printout of the running processes and their GPU memory use for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + try: + import pynvml # type: ignore[import] + except ModuleNotFoundError: + return "pynvml module not found, please install pynvml" + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded: + return "cuda driver can't be loaded, is cuda enabled?" + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) + lines = [] + lines.append(f"GPU:{device}") + if len(procs) == 0: + lines.append("no processes are running") + for p in procs: + mem = p.usedGpuMemory / (1024 * 1024) + lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory") + return "\n".join(lines) + + +def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]: + r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + return torch.cuda.cudart().cudaMemGetInfo(device) + + +def _record_memory_history_legacy( + enabled: bool, + record_context=True, + trace_alloc_max_entries=1, + trace_alloc_record_context=False, + device: Union[Device, int] = None, + record_context_cpp=False, +): + _C._cuda_record_memory_history_legacy( + enabled, + record_context, + trace_alloc_max_entries, + trace_alloc_record_context, + record_context_cpp, + ) + + +def _record_memory_history(enabled="all", *args, **kwargs): + """Enable recording of stack traces associated with memory + allocations, so you can tell what allocated any piece of memory in + :func:`torch.cuda.memory._snapshot()`. + + In addition too keeping stack traces with each current allocation and free, + this will also enable recording of a history of all alloc/free events. + + Use :func:`torch.cuda.memory._snapshot()` to retrieve this information, + and the tools in `_memory_viz.py` to visualize snapshots. + + The Python trace collection is fast (2us per trace), so you may consider + enabling this on production jobs if you anticipate ever having to debug + memory issues. + + C++ trace collection is also fast (~50ns/frame), which for many typical programs + works out to ~2us per trace, but can vary depending on stack depth. + + Args: + enabled (Literal[None, "state", "all"], optional): + `None`, disable recording memory history. + `"state"`, keep information for currenly allocated memory. + `"all"`, additionally keep a history of all alloc/free calls. + Defaults to "all". + context (Literal[None, "state", "alloc", "all"], optional): + `None`, Do not record any tracebacks. + `"state"`, Record tracebacks for currently allocated memory. + `"alloc"`, additionally keep tracebacks for alloc calls. + `"all"`, additionally keep tracebacks for free calls. + Defaults to "all". + stacks (Literal["python", "all"], optional): + `"python"`, include Python, TorchScript, and inductor frames in tracebacks + `"all"`, additionally include C++ frames + Defaults to "all". + max_entries (int, optional): Keep a maximum of `max_entries` + alloc/free events in the recorded history recorded. + """ + if isinstance(enabled, bool): + return _record_memory_history_legacy(enabled, *args, **kwargs) + else: + return _record_memory_history_impl(enabled, *args, **kwargs) + + +def _record_memory_history_impl( + enabled: Optional[str] = "all", + context: Optional[str] = "all", + stacks: str = "all", + max_entries: int = sys.maxsize, + device: Union[Device, int] = None, +): + _C._cuda_record_memory_history(enabled, context, stacks, max_entries) + + +_record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined] + + +def _snapshot(device: Union[Device, int] = None): + """Save a snapshot of CUDA memory state at the time it was called. + + The state is represented as a dictionary with the following structure. + + .. code-block:: python + + class Snapshot(TypedDict): + segments : List[Segment] + device_traces: List[List[TraceEntry]] + + class Segment(TypedDict): + # Segments are memory returned from a cudaMalloc call. + # The size of reserved memory is the sum of all Segments. + # Segments are cached and reused for future allocations. + # If the reuse is smaller than the segment, the segment + # is split into more then one Block. + # empty_cache() frees Segments that are entirely inactive. + address: int + total_size: int # cudaMalloc'd size of segment + stream: int + segment_type: Literal['small', 'large'] # 'large' (>1MB) + allocated_size: int # size of memory in use + active_size: int # size of memory in use or in active_awaiting_free state + blocks : List[Block] + + class Block(TypedDict): + # A piece of memory returned from the allocator, or + # current cached but inactive. + size: int + requested_size: int # size requested during malloc, may be smaller than + # size due to rounding + address: int + state: Literal['active_allocated', # used by a tensor + 'active_awaiting_free', # waiting for another stream to finish using + # this, then it will become free + 'inactive',] # free for reuse + frames: List[Frame] # stack trace from where the allocation occurred + + class Frame(TypedDict): + filename: str + line: int + name: str + + class TraceEntry(TypedDict): + # When `torch.cuda.memory._record_memory_history()` is enabled, + # the snapshot will contain TraceEntry objects that record each + # action the allocator took. + action: Literal[ + 'alloc' # memory allocated + 'free_requested', # the allocated received a call to free memory + 'free_completed', # the memory that was requested to be freed is now + # able to be used in future allocation calls + 'segment_alloc', # the caching allocator ask cudaMalloc for more memory + # and added it as a segment in its cache + 'segment_free', # the caching allocator called cudaFree to return memory + # to cuda possibly trying free up memory to + # allocate more segments or because empty_caches was called + 'oom', # the allocator threw an OOM exception. 'size' is + # the requested number of bytes that did not succeed + 'snapshot' # the allocator generated a memory snapshot + # useful to coorelate a previously taken + # snapshot with this trace + ] + addr: int # not present for OOM + frames: List[Frame] + size: int + stream: int + device_free: int # only present for OOM, the amount of + # memory cuda still reports to be free + + Returns: + The Snapshot dictionary object + """ + return _C._cuda_memorySnapshot() + + +def _dump_snapshot(filename="dump_snapshot.pickle"): + """ + Save a pickled version of the `torch.memory._snapshot()` dictionary to a file. + + This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz + + Args: + filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle". + """ + s = _snapshot() + with open(filename, "wb") as f: + pickle.dump(s, f) + + +def _save_segment_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_segments(snapshot)) + + +def _save_memory_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_memory(snapshot)) + + +def _set_allocator_settings(env: str): + return torch._C._cuda_cudaCachingAllocator_set_allocator_settings(env) + + +def get_allocator_backend() -> str: + r"""Return a string describing the active allocator backend as set by + ``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are + ``native`` (PyTorch's native caching allocator) and `cudaMallocAsync`` + (CUDA's built-in asynchronous allocator). + + .. note:: + See :ref:`cuda-memory-management` for details on choosing the allocator backend. + """ + return torch._C._cuda_getAllocatorBackend() + + +class _CUDAAllocator: + r"""Wrapper over internal CUDA memory allocators.""" + + def __init__(self, allocator: torch._C._cuda_CUDAAllocator): + self._allocator = allocator + + def allocator(self): + return self._allocator + + +class CUDAPluggableAllocator(_CUDAAllocator): + r"""CUDA memory allocator loaded from a so file.""" + + def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str): + r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes. + + To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function. + + Args: + path_to_so_file(str): Path in the filesystem to the `.so` file containing + the allocator functions + alloc_fn_name(str): Name of the function to perform the memory allocation + in the so file. The signature must be: + void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream); + free_fn_name(str): Name of the function to perform the memory release + in the so file. The signature must be: + void free_fn_name(void* ptr, size_t size, cudaStream_t stream); + + .. warning:: + This is currently supported only in unix OSs + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + allocator = ctypes.CDLL(path_to_so_file) + alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value + free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value + assert alloc_fn is not None + assert free_fn is not None + self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn) + + +def change_current_allocator(allocator: _CUDAAllocator) -> None: + r"""Change the currently used memory allocator to be the one provided. + + If the current allocator has already been used/initialized, this function will error. + + + Args: + allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one. + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + torch._C._cuda_changeCurrentAllocator(allocator.allocator()) + + +def _get_current_allocator() -> _CUDAAllocator: + r"""Return the allocator being currently used. + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + return _CUDAAllocator(torch._C._cuda_getAllocator()) diff --git a/venv/lib/python3.10/site-packages/torch/cuda/profiler.py b/venv/lib/python3.10/site-packages/torch/cuda/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..51c8aa46f714b6a9fd30857c9edb575614d52420 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/cuda/profiler.py @@ -0,0 +1,61 @@ +import contextlib +import tempfile + +import torch +from . import check_error, cudart + +__all__ = ["init", "start", "stop", "profile"] + +DEFAULT_FLAGS = [ + "gpustarttimestamp", + "gpuendtimestamp", + "gridsize3d", + "threadblocksize", + "streamid", + "enableonstart 0", + "conckerneltrace", +] + + +def init(output_file, flags=None, output_mode="key_value"): + rt = cudart() + if not hasattr(rt, "cudaOutputMode"): + raise AssertionError("HIP does not support profiler initialization!") + if ( + hasattr(torch.version, "cuda") + and torch.version.cuda is not None + and int(torch.version.cuda.split(".")[0]) >= 12 + ): + # Check https://github.com/pytorch/pytorch/pull/91118 + # cudaProfilerInitialize is no longer needed after CUDA 12 + raise AssertionError("CUDA12+ does not need profiler initialization!") + flags = DEFAULT_FLAGS if flags is None else flags + if output_mode == "key_value": + output_mode_enum = rt.cudaOutputMode.KeyValuePair + elif output_mode == "csv": + output_mode_enum = rt.cudaOutputMode.CSV + else: + raise RuntimeError( + "supported CUDA profiler output modes are: key_value and csv" + ) + with tempfile.NamedTemporaryFile(delete=True) as f: + f.write(b"\n".join(f.encode("ascii") for f in flags)) + f.flush() + check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum)) + + +def start(): + check_error(cudart().cudaProfilerStart()) + + +def stop(): + check_error(cudart().cudaProfilerStop()) + + +@contextlib.contextmanager +def profile(): + try: + start() + yield + finally: + stop() diff --git a/venv/lib/python3.10/site-packages/torch/include/clog.h b/venv/lib/python3.10/site-packages/torch/include/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..bec164caaabd0cd89b60afe128cb5e0f736452e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/clog.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY + #if defined(__ELF__) + #define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) + #else + #define CLOG_VISIBILITY + #endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT + #if defined(__GNUC__) + #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) + #else + #define CLOG_ARGUMENTS_FORMAT + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/cpuinfo.h b/venv/lib/python3.10/site-packages/torch/include/cpuinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb535f1c9e25d133e98253370c917a306c57119 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/cpuinfo.h @@ -0,0 +1,1956 @@ +#pragma once +#ifndef CPUINFO_H +#define CPUINFO_H + +#ifndef __cplusplus + #include +#endif + +#ifdef __APPLE__ + #include +#endif + +#include + +/* Identify architecture and define corresponding macro */ + +#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) + #define CPUINFO_ARCH_X86 1 +#endif + +#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) + #define CPUINFO_ARCH_X86_64 1 +#endif + +#if defined(__arm__) || defined(_M_ARM) + #define CPUINFO_ARCH_ARM 1 +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) + #define CPUINFO_ARCH_ARM64 1 +#endif + +#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) + #define CPUINFO_ARCH_PPC64 1 +#endif + +#if defined(__asmjs__) + #define CPUINFO_ARCH_ASMJS 1 +#endif + +#if defined(__wasm__) + #if defined(__wasm_simd128__) + #define CPUINFO_ARCH_WASMSIMD 1 + #else + #define CPUINFO_ARCH_WASM 1 + #endif +#endif + +/* Define other architecture-specific macros as 0 */ + +#ifndef CPUINFO_ARCH_X86 + #define CPUINFO_ARCH_X86 0 +#endif + +#ifndef CPUINFO_ARCH_X86_64 + #define CPUINFO_ARCH_X86_64 0 +#endif + +#ifndef CPUINFO_ARCH_ARM + #define CPUINFO_ARCH_ARM 0 +#endif + +#ifndef CPUINFO_ARCH_ARM64 + #define CPUINFO_ARCH_ARM64 0 +#endif + +#ifndef CPUINFO_ARCH_PPC64 + #define CPUINFO_ARCH_PPC64 0 +#endif + +#ifndef CPUINFO_ARCH_ASMJS + #define CPUINFO_ARCH_ASMJS 0 +#endif + +#ifndef CPUINFO_ARCH_WASM + #define CPUINFO_ARCH_WASM 0 +#endif + +#ifndef CPUINFO_ARCH_WASMSIMD + #define CPUINFO_ARCH_WASMSIMD 0 +#endif + +#if CPUINFO_ARCH_X86 && defined(_MSC_VER) + #define CPUINFO_ABI __cdecl +#elif CPUINFO_ARCH_X86 && defined(__GNUC__) + #define CPUINFO_ABI __attribute__((__cdecl__)) +#else + #define CPUINFO_ABI +#endif + +#define CPUINFO_CACHE_UNIFIED 0x00000001 +#define CPUINFO_CACHE_INCLUSIVE 0x00000002 +#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004 + +struct cpuinfo_cache { + /** Cache size in bytes */ + uint32_t size; + /** Number of ways of associativity */ + uint32_t associativity; + /** Number of sets */ + uint32_t sets; + /** Number of partitions */ + uint32_t partitions; + /** Line size in bytes */ + uint32_t line_size; + /** + * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing). + * + * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING + */ + uint32_t flags; + /** Index of the first logical processor that shares this cache */ + uint32_t processor_start; + /** Number of logical processors that share this cache */ + uint32_t processor_count; +}; + +struct cpuinfo_trace_cache { + uint32_t uops; + uint32_t associativity; +}; + +#define CPUINFO_PAGE_SIZE_4KB 0x1000 +#define CPUINFO_PAGE_SIZE_1MB 0x100000 +#define CPUINFO_PAGE_SIZE_2MB 0x200000 +#define CPUINFO_PAGE_SIZE_4MB 0x400000 +#define CPUINFO_PAGE_SIZE_16MB 0x1000000 +#define CPUINFO_PAGE_SIZE_1GB 0x40000000 + +struct cpuinfo_tlb { + uint32_t entries; + uint32_t associativity; + uint64_t pages; +}; + +/** Vendor of processor core design */ +enum cpuinfo_vendor { + /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */ + cpuinfo_vendor_unknown = 0, + + /* Active vendors of modern CPUs */ + + /** + * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures. + * + * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004. + */ + cpuinfo_vendor_intel = 1, + /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */ + cpuinfo_vendor_amd = 2, + /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_arm = 3, + /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_qualcomm = 4, + /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_apple = 5, + /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */ + cpuinfo_vendor_samsung = 6, + /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */ + cpuinfo_vendor_nvidia = 7, + /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_mips = 8, + /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */ + cpuinfo_vendor_ibm = 9, + /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_ingenic = 10, + /** + * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures. + * + * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies. + */ + cpuinfo_vendor_via = 11, + /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_cavium = 12, + /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_broadcom = 13, + /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_apm = 14, + /** + * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures. + * + * Processors are designed by HiSilicon, a subsidiary of Huawei. + */ + cpuinfo_vendor_huawei = 15, + /** + * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures. + * + * Processors are variants of AMD cores. + */ + cpuinfo_vendor_hygon = 16, + + /* Active vendors of embedded CPUs */ + + /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_texas_instruments = 30, + /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_marvell = 31, + /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_rdc = 32, + /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_dmp = 33, + /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */ + cpuinfo_vendor_motorola = 34, + + /* Defunct CPU vendors */ + + /** + * Transmeta Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 2004. + * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code. + */ + cpuinfo_vendor_transmeta = 50, + /** + * Cyrix Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1996. + */ + cpuinfo_vendor_cyrix = 51, + /** + * Rise Technology. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1999. + */ + cpuinfo_vendor_rise = 52, + /** + * National Semiconductor. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998. + */ + cpuinfo_vendor_nsc = 53, + /** + * Silicon Integrated Systems. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001. + */ + cpuinfo_vendor_sis = 54, + /** + * NexGen. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1994. + * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations. + */ + cpuinfo_vendor_nexgen = 55, + /** + * United Microelectronics Corporation. Vendor of x86 processor microarchitectures. + * + * Ceased x86 in the early 1990s. The last processor design was released in 1991. + * Designed U5C and U5D processors. Both are 486 level. + */ + cpuinfo_vendor_umc = 56, + /** + * Digital Equipment Corporation. Vendor of ARM processor microarchitecture. + * + * Sold its ARM designs in 1997. The last processor design was released in 1997. + */ + cpuinfo_vendor_dec = 57, +}; + +/** + * Processor microarchitecture + * + * Processors with different microarchitectures often have different instruction performance characteristics, + * and may have dramatically different pipeline organization. + */ +enum cpuinfo_uarch { + /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */ + cpuinfo_uarch_unknown = 0, + + /** Pentium and Pentium MMX microarchitecture. */ + cpuinfo_uarch_p5 = 0x00100100, + /** Intel Quark microarchitecture. */ + cpuinfo_uarch_quark = 0x00100101, + + /** Pentium Pro, Pentium II, and Pentium III. */ + cpuinfo_uarch_p6 = 0x00100200, + /** Pentium M. */ + cpuinfo_uarch_dothan = 0x00100201, + /** Intel Core microarchitecture. */ + cpuinfo_uarch_yonah = 0x00100202, + /** Intel Core 2 microarchitecture on 65 nm process. */ + cpuinfo_uarch_conroe = 0x00100203, + /** Intel Core 2 microarchitecture on 45 nm process. */ + cpuinfo_uarch_penryn = 0x00100204, + /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */ + cpuinfo_uarch_nehalem = 0x00100205, + /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */ + cpuinfo_uarch_sandy_bridge = 0x00100206, + /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */ + cpuinfo_uarch_ivy_bridge = 0x00100207, + /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */ + cpuinfo_uarch_haswell = 0x00100208, + /** Intel Broadwell microarchitecture. */ + cpuinfo_uarch_broadwell = 0x00100209, + /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */ + cpuinfo_uarch_sky_lake = 0x0010020A, + /** DEPRECATED (Intel Kaby Lake microarchitecture). */ + cpuinfo_uarch_kaby_lake = 0x0010020A, + /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */ + cpuinfo_uarch_palm_cove = 0x0010020B, + /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */ + cpuinfo_uarch_sunny_cove = 0x0010020C, + + /** Pentium 4 with Willamette, Northwood, or Foster cores. */ + cpuinfo_uarch_willamette = 0x00100300, + /** Pentium 4 with Prescott and later cores. */ + cpuinfo_uarch_prescott = 0x00100301, + + /** Intel Atom on 45 nm process. */ + cpuinfo_uarch_bonnell = 0x00100400, + /** Intel Atom on 32 nm process. */ + cpuinfo_uarch_saltwell = 0x00100401, + /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */ + cpuinfo_uarch_silvermont = 0x00100402, + /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */ + cpuinfo_uarch_airmont = 0x00100403, + /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */ + cpuinfo_uarch_goldmont = 0x00100404, + /** Intel Goldmont Plus microarchitecture (Gemini Lake). */ + cpuinfo_uarch_goldmont_plus = 0x00100405, + + /** Intel Knights Ferry HPC boards. */ + cpuinfo_uarch_knights_ferry = 0x00100500, + /** Intel Knights Corner HPC boards (aka Xeon Phi). */ + cpuinfo_uarch_knights_corner = 0x00100501, + /** Intel Knights Landing microarchitecture (second-gen MIC). */ + cpuinfo_uarch_knights_landing = 0x00100502, + /** Intel Knights Hill microarchitecture (third-gen MIC). */ + cpuinfo_uarch_knights_hill = 0x00100503, + /** Intel Knights Mill Xeon Phi. */ + cpuinfo_uarch_knights_mill = 0x00100504, + + /** Intel/Marvell XScale series. */ + cpuinfo_uarch_xscale = 0x00100600, + + /** AMD K5. */ + cpuinfo_uarch_k5 = 0x00200100, + /** AMD K6 and alike. */ + cpuinfo_uarch_k6 = 0x00200101, + /** AMD Athlon and Duron. */ + cpuinfo_uarch_k7 = 0x00200102, + /** AMD Athlon 64, Opteron 64. */ + cpuinfo_uarch_k8 = 0x00200103, + /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */ + cpuinfo_uarch_k10 = 0x00200104, + /** + * AMD Bulldozer microarchitecture + * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs. + */ + cpuinfo_uarch_bulldozer = 0x00200105, + /** + * AMD Piledriver microarchitecture + * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs. + */ + cpuinfo_uarch_piledriver = 0x00200106, + /** AMD Steamroller microarchitecture (Kaveri APUs). */ + cpuinfo_uarch_steamroller = 0x00200107, + /** AMD Excavator microarchitecture (Carizzo APUs). */ + cpuinfo_uarch_excavator = 0x00200108, + /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen = 0x00200109, + /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen2 = 0x0020010A, + /** AMD Zen 3 microarchitecture. */ + cpuinfo_uarch_zen3 = 0x0020010B, + /** AMD Zen 4 microarchitecture. */ + cpuinfo_uarch_zen4 = 0x0020010C, + + /** NSC Geode and AMD Geode GX and LX. */ + cpuinfo_uarch_geode = 0x00200200, + /** AMD Bobcat mobile microarchitecture. */ + cpuinfo_uarch_bobcat = 0x00200201, + /** AMD Jaguar mobile microarchitecture. */ + cpuinfo_uarch_jaguar = 0x00200202, + /** AMD Puma mobile microarchitecture. */ + cpuinfo_uarch_puma = 0x00200203, + + /** ARM7 series. */ + cpuinfo_uarch_arm7 = 0x00300100, + /** ARM9 series. */ + cpuinfo_uarch_arm9 = 0x00300101, + /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */ + cpuinfo_uarch_arm11 = 0x00300102, + + /** ARM Cortex-A5. */ + cpuinfo_uarch_cortex_a5 = 0x00300205, + /** ARM Cortex-A7. */ + cpuinfo_uarch_cortex_a7 = 0x00300207, + /** ARM Cortex-A8. */ + cpuinfo_uarch_cortex_a8 = 0x00300208, + /** ARM Cortex-A9. */ + cpuinfo_uarch_cortex_a9 = 0x00300209, + /** ARM Cortex-A12. */ + cpuinfo_uarch_cortex_a12 = 0x00300212, + /** ARM Cortex-A15. */ + cpuinfo_uarch_cortex_a15 = 0x00300215, + /** ARM Cortex-A17. */ + cpuinfo_uarch_cortex_a17 = 0x00300217, + + /** ARM Cortex-A32. */ + cpuinfo_uarch_cortex_a32 = 0x00300332, + /** ARM Cortex-A35. */ + cpuinfo_uarch_cortex_a35 = 0x00300335, + /** ARM Cortex-A53. */ + cpuinfo_uarch_cortex_a53 = 0x00300353, + /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */ + cpuinfo_uarch_cortex_a55r0 = 0x00300354, + /** ARM Cortex-A55. */ + cpuinfo_uarch_cortex_a55 = 0x00300355, + /** ARM Cortex-A57. */ + cpuinfo_uarch_cortex_a57 = 0x00300357, + /** ARM Cortex-A65. */ + cpuinfo_uarch_cortex_a65 = 0x00300365, + /** ARM Cortex-A72. */ + cpuinfo_uarch_cortex_a72 = 0x00300372, + /** ARM Cortex-A73. */ + cpuinfo_uarch_cortex_a73 = 0x00300373, + /** ARM Cortex-A75. */ + cpuinfo_uarch_cortex_a75 = 0x00300375, + /** ARM Cortex-A76. */ + cpuinfo_uarch_cortex_a76 = 0x00300376, + /** ARM Cortex-A77. */ + cpuinfo_uarch_cortex_a77 = 0x00300377, + /** ARM Cortex-A78. */ + cpuinfo_uarch_cortex_a78 = 0x00300378, + + /** ARM Neoverse N1. */ + cpuinfo_uarch_neoverse_n1 = 0x00300400, + /** ARM Neoverse E1. */ + cpuinfo_uarch_neoverse_e1 = 0x00300401, + /** ARM Neoverse V1. */ + cpuinfo_uarch_neoverse_v1 = 0x00300402, + /** ARM Neoverse N2. */ + cpuinfo_uarch_neoverse_n2 = 0x00300403, + /** ARM Neoverse V2. */ + cpuinfo_uarch_neoverse_v2 = 0x00300404, + + /** ARM Cortex-X1. */ + cpuinfo_uarch_cortex_x1 = 0x00300501, + /** ARM Cortex-X2. */ + cpuinfo_uarch_cortex_x2 = 0x00300502, + /** ARM Cortex-X3. */ + cpuinfo_uarch_cortex_x3 = 0x00300503, + + /** ARM Cortex-A510. */ + cpuinfo_uarch_cortex_a510 = 0x00300551, + /** ARM Cortex-A710. */ + cpuinfo_uarch_cortex_a710 = 0x00300571, + /** ARM Cortex-A715. */ + cpuinfo_uarch_cortex_a715 = 0x00300572, + + /** Qualcomm Scorpion. */ + cpuinfo_uarch_scorpion = 0x00400100, + /** Qualcomm Krait. */ + cpuinfo_uarch_krait = 0x00400101, + /** Qualcomm Kryo. */ + cpuinfo_uarch_kryo = 0x00400102, + /** Qualcomm Falkor. */ + cpuinfo_uarch_falkor = 0x00400103, + /** Qualcomm Saphira. */ + cpuinfo_uarch_saphira = 0x00400104, + + /** Nvidia Denver. */ + cpuinfo_uarch_denver = 0x00500100, + /** Nvidia Denver 2. */ + cpuinfo_uarch_denver2 = 0x00500101, + /** Nvidia Carmel. */ + cpuinfo_uarch_carmel = 0x00500102, + + /** Samsung Exynos M1 (Exynos 8890 big cores). */ + cpuinfo_uarch_exynos_m1 = 0x00600100, + /** Samsung Exynos M2 (Exynos 8895 big cores). */ + cpuinfo_uarch_exynos_m2 = 0x00600101, + /** Samsung Exynos M3 (Exynos 9810 big cores). */ + cpuinfo_uarch_exynos_m3 = 0x00600102, + /** Samsung Exynos M4 (Exynos 9820 big cores). */ + cpuinfo_uarch_exynos_m4 = 0x00600103, + /** Samsung Exynos M5 (Exynos 9830 big cores). */ + cpuinfo_uarch_exynos_m5 = 0x00600104, + + /* Deprecated synonym for Cortex-A76 */ + cpuinfo_uarch_cortex_a76ae = 0x00300376, + /* Deprecated names for Exynos. */ + cpuinfo_uarch_mongoose_m1 = 0x00600100, + cpuinfo_uarch_mongoose_m2 = 0x00600101, + cpuinfo_uarch_meerkat_m3 = 0x00600102, + cpuinfo_uarch_meerkat_m4 = 0x00600103, + + /** Apple A6 and A6X processors. */ + cpuinfo_uarch_swift = 0x00700100, + /** Apple A7 processor. */ + cpuinfo_uarch_cyclone = 0x00700101, + /** Apple A8 and A8X processor. */ + cpuinfo_uarch_typhoon = 0x00700102, + /** Apple A9 and A9X processor. */ + cpuinfo_uarch_twister = 0x00700103, + /** Apple A10 and A10X processor. */ + cpuinfo_uarch_hurricane = 0x00700104, + /** Apple A11 processor (big cores). */ + cpuinfo_uarch_monsoon = 0x00700105, + /** Apple A11 processor (little cores). */ + cpuinfo_uarch_mistral = 0x00700106, + /** Apple A12 processor (big cores). */ + cpuinfo_uarch_vortex = 0x00700107, + /** Apple A12 processor (little cores). */ + cpuinfo_uarch_tempest = 0x00700108, + /** Apple A13 processor (big cores). */ + cpuinfo_uarch_lightning = 0x00700109, + /** Apple A13 processor (little cores). */ + cpuinfo_uarch_thunder = 0x0070010A, + /** Apple A14 / M1 processor (big cores). */ + cpuinfo_uarch_firestorm = 0x0070010B, + /** Apple A14 / M1 processor (little cores). */ + cpuinfo_uarch_icestorm = 0x0070010C, + /** Apple A15 / M2 processor (big cores). */ + cpuinfo_uarch_avalanche = 0x0070010D, + /** Apple A15 / M2 processor (little cores). */ + cpuinfo_uarch_blizzard = 0x0070010E, + + /** Cavium ThunderX. */ + cpuinfo_uarch_thunderx = 0x00800100, + /** Cavium ThunderX2 (originally Broadcom Vulkan). */ + cpuinfo_uarch_thunderx2 = 0x00800200, + + /** Marvell PJ4. */ + cpuinfo_uarch_pj4 = 0x00900100, + + /** Broadcom Brahma B15. */ + cpuinfo_uarch_brahma_b15 = 0x00A00100, + /** Broadcom Brahma B53. */ + cpuinfo_uarch_brahma_b53 = 0x00A00101, + + /** Applied Micro X-Gene. */ + cpuinfo_uarch_xgene = 0x00B00100, + + /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */ + cpuinfo_uarch_dhyana = 0x01000100, + + /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */ + cpuinfo_uarch_taishan_v110 = 0x00C00100, +}; + +struct cpuinfo_processor { + /** SMT (hyperthread) ID within a core */ + uint32_t smt_id; + /** Core containing this logical processor */ + const struct cpuinfo_core* core; + /** Cluster of cores containing this logical processor */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this logical processor */ + const struct cpuinfo_package* package; +#if defined(__linux__) + /** + * Linux-specific ID for the logical processor: + * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu/ + * - Bit in the cpu_set_t identifies this logical processor + */ + int linux_id; +#endif +#if defined(_WIN32) || defined(__CYGWIN__) + /** Windows-specific ID for the group containing the logical processor. */ + uint16_t windows_group_id; + /** + * Windows-specific ID of the logical processor within its group: + * - Bit in the KAFFINITY mask identifies this logical processor within its group. + */ + uint16_t windows_processor_id; +#endif +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** APIC ID (unique x86-specific ID of the logical processor) */ + uint32_t apic_id; +#endif + struct { + /** Level 1 instruction cache */ + const struct cpuinfo_cache* l1i; + /** Level 1 data cache */ + const struct cpuinfo_cache* l1d; + /** Level 2 unified or data cache */ + const struct cpuinfo_cache* l2; + /** Level 3 unified or data cache */ + const struct cpuinfo_cache* l3; + /** Level 4 unified or data cache */ + const struct cpuinfo_cache* l4; + } cache; +}; + +struct cpuinfo_core { + /** Index of the first logical processor on this core. */ + uint32_t processor_start; + /** Number of logical processors on this core */ + uint32_t processor_count; + /** Core ID within a package */ + uint32_t core_id; + /** Cluster containing this core */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this core. */ + const struct cpuinfo_package* package; + /** Vendor of the CPU microarchitecture for this core */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture for this core */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for this core */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for this core */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the core, in Hz */ + uint64_t frequency; +}; + +struct cpuinfo_cluster { + /** Index of the first logical processor in the cluster */ + uint32_t processor_start; + /** Number of logical processors in the cluster */ + uint32_t processor_count; + /** Index of the first core in the cluster */ + uint32_t core_start; + /** Number of cores on the cluster */ + uint32_t core_count; + /** Cluster ID within a package */ + uint32_t cluster_id; + /** Physical package containing the cluster */ + const struct cpuinfo_package* package; + /** CPU microarchitecture vendor of the cores in the cluster */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture of the cores in the cluster */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register of the cores in the cluster */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) of the cores in the cluster */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */ + uint64_t frequency; +}; + +#define CPUINFO_PACKAGE_NAME_MAX 48 + +struct cpuinfo_package { + /** SoC or processor chip model name */ + char name[CPUINFO_PACKAGE_NAME_MAX]; + /** Index of the first logical processor on this physical package */ + uint32_t processor_start; + /** Number of logical processors on this physical package */ + uint32_t processor_count; + /** Index of the first core on this physical package */ + uint32_t core_start; + /** Number of cores on this physical package */ + uint32_t core_count; + /** Index of the first cluster of cores on this physical package */ + uint32_t cluster_start; + /** Number of clusters of cores on this physical package */ + uint32_t cluster_count; +}; + +struct cpuinfo_uarch_info { + /** Type of CPU microarchitecture */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for the microarchitecture */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for the microarchitecture */ + uint32_t midr; +#endif + /** Number of logical processors with the microarchitecture */ + uint32_t processor_count; + /** Number of cores with the microarchitecture */ + uint32_t core_count; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +bool CPUINFO_ABI cpuinfo_initialize(void); + +void CPUINFO_ABI cpuinfo_deinitialize(void); + +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */ + struct cpuinfo_x86_isa { + #if CPUINFO_ARCH_X86 + bool rdtsc; + #endif + bool rdtscp; + bool rdpid; + bool sysenter; + #if CPUINFO_ARCH_X86 + bool syscall; + #endif + bool msr; + bool clzero; + bool clflush; + bool clflushopt; + bool mwait; + bool mwaitx; + #if CPUINFO_ARCH_X86 + bool emmx; + #endif + bool fxsave; + bool xsave; + #if CPUINFO_ARCH_X86 + bool fpu; + bool mmx; + bool mmx_plus; + #endif + bool three_d_now; + bool three_d_now_plus; + #if CPUINFO_ARCH_X86 + bool three_d_now_geode; + #endif + bool prefetch; + bool prefetchw; + bool prefetchwt1; + #if CPUINFO_ARCH_X86 + bool daz; + bool sse; + bool sse2; + #endif + bool sse3; + bool ssse3; + bool sse4_1; + bool sse4_2; + bool sse4a; + bool misaligned_sse; + bool avx; + bool avxvnni; + bool fma3; + bool fma4; + bool xop; + bool f16c; + bool avx2; + bool avx512f; + bool avx512pf; + bool avx512er; + bool avx512cd; + bool avx512dq; + bool avx512bw; + bool avx512vl; + bool avx512ifma; + bool avx512vbmi; + bool avx512vbmi2; + bool avx512bitalg; + bool avx512vpopcntdq; + bool avx512vnni; + bool avx512bf16; + bool avx512fp16; + bool avx512vp2intersect; + bool avx512_4vnniw; + bool avx512_4fmaps; + bool hle; + bool rtm; + bool xtest; + bool mpx; + #if CPUINFO_ARCH_X86 + bool cmov; + bool cmpxchg8b; + #endif + bool cmpxchg16b; + bool clwb; + bool movbe; + #if CPUINFO_ARCH_X86_64 + bool lahf_sahf; + #endif + bool fs_gs_base; + bool lzcnt; + bool popcnt; + bool tbm; + bool bmi; + bool bmi2; + bool adx; + bool aes; + bool vaes; + bool pclmulqdq; + bool vpclmulqdq; + bool gfni; + bool rdrand; + bool rdseed; + bool sha; + bool rng; + bool ace; + bool ace2; + bool phe; + bool pmm; + bool lwp; + }; + + extern struct cpuinfo_x86_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_x86_rdtsc(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.rdtsc; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdtscp(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdtscp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdpid(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdpid; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clzero(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clzero; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwait(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwait; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwaitx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwaitx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fxsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fxsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fpu(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.fpu; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx_plus(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx_plus; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_plus(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now_plus; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_geode(void) { + #if CPUINFO_ARCH_X86_64 + return false; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return false; + #else + return cpuinfo_isa.three_d_now_geode; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetch(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetch; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchwt1(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchwt1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_daz(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.daz; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse2(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse2; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_ssse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.ssse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_1(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_1; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_2(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_2; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4a(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sse4a; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_misaligned_sse(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.misaligned_sse; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avxvnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avxvnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma4(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma4; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xop(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xop; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_f16c(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.f16c; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512f(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512f; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512pf(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512pf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512er(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512er; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512cd(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512cd; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512dq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512dq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vl(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vl; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512ifma(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512ifma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bitalg(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bitalg; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vpopcntdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bf16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512fp16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vp2intersect(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vp2intersect; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4vnniw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4vnniw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4fmaps(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4fmaps; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_hle(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.hle; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rtm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rtm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xtest(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xtest; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mpx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mpx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmov(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmov; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg8b(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmpxchg8b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg16b(void) { + #if CPUINFO_ARCH_X86_64 + return cpuinfo_isa.cmpxchg16b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clwb(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clwb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_movbe(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.movbe; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lahf_sahf(void) { + #if CPUINFO_ARCH_X86 + return true; + #elif CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lahf_sahf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lzcnt(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lzcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_popcnt(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.popcnt; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.popcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_tbm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.tbm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_adx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.adx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_aes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vaes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vaes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_pclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.pclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vpclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vpclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_gfni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.gfni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdrand(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdrand; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdseed(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdseed; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sha(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sha; + #else + return false; + #endif +} + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */ + struct cpuinfo_arm_isa { + #if CPUINFO_ARCH_ARM + bool thumb; + bool thumb2; + bool thumbee; + bool jazelle; + bool armv5e; + bool armv6; + bool armv6k; + bool armv7; + bool armv7mp; + bool armv8; + bool idiv; + + bool vfpv2; + bool vfpv3; + bool d32; + bool fp16; + bool fma; + + bool wmmx; + bool wmmx2; + bool neon; + #endif + #if CPUINFO_ARCH_ARM64 + bool atomics; + bool bf16; + bool sve; + bool sve2; + bool i8mm; + #endif + bool rdm; + bool fp16arith; + bool dot; + bool jscvt; + bool fcma; + bool fhm; + + bool aes; + bool sha1; + bool sha2; + bool pmull; + bool crc32; + }; + + extern struct cpuinfo_arm_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_arm_thumb(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_thumb2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v5e(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv5e; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6k(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6k; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7mp(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7mp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_idiv(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.idiv; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fp16_arith(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fma(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_atomics(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.atomics; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_rdm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.rdm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16_arith(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16arith; + #elif CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fhm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fhm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_dot(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.dot; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_jscvt(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.jscvt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fcma(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fcma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_i8mm(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.i8mm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_aes(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha1(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha2(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_pmull(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.pmull; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_crc32(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.crc32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve && cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve2(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve2; + #else + return false; + #endif +} + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void); + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index); + +uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void); + +/** + * Returns upper bound on cache size. + */ +uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void); + +/** + * Identify the logical processor that executes the current thread. + * + * There is no guarantee that the thread will stay on the same logical processor for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void); + +/** + * Identify the core that executes the current thread. + * + * There is no guarantee that the thread will stay on the same core for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns 0. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns the user-specified default value. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* CPUINFO_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl.h b/venv/lib/python3.10/site-packages/torch/include/dnnl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc74bf644f4b628018d7a9103ba63320abc466d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_H +#define DNNL_H + +#include "oneapi/dnnl/dnnl.h" + +#endif /* DNNL_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_config.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_config.h new file mode 100644 index 0000000000000000000000000000000000000000..48925e1e3ab49ae135c6e9c4c501aa2f5e030913 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_config.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_CONFIG_H +#define DNNL_CONFIG_H + +#include "oneapi/dnnl/dnnl_config.h" + +#endif /* DNNL_CONFIG_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_debug.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..5044971832bbbe56127920a527508b207a803eea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_debug.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_DEBUG_H +#define DNNL_DEBUG_H + +#include "oneapi/dnnl/dnnl_debug.h" + +#endif /* DNNL_DEBUG_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_ocl.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_ocl.h new file mode 100644 index 0000000000000000000000000000000000000000..ad731150b28babe7bd5a911acd8de70c57e85254 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_ocl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_OCL_H +#define DNNL_OCL_H + +#include "oneapi/dnnl/dnnl_ocl.h" + +#endif /* DNNL_OCL_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl.h new file mode 100644 index 0000000000000000000000000000000000000000..4501598c2f461021f0fa818e95fd1972ce2d3ace --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_H +#define DNNL_SYCL_H + +#include "oneapi/dnnl/dnnl_sycl.h" + +#endif /* DNNL_SYCL_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a4a854a4cf138103f4c53030083e119cc0732cf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_TYPES_H +#define DNNL_SYCL_TYPES_H + +#include "oneapi/dnnl/dnnl_sycl_types.h" + +#endif /* DNNL_SYCL_TYPES_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..e27e584a65ed16740d4fde93da3a1a049dd111aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_THREADPOOL_H +#define DNNL_THREADPOOL_H + +#include "oneapi/dnnl/dnnl_threadpool.h" + +#endif /* DNNL_THREADPOOL_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_types.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..6f4261b712dc37ec2416ba60c0c68bb30f6995e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_TYPES_H +#define DNNL_TYPES_H + +#include "oneapi/dnnl/dnnl_types.h" + +#endif /* DNNL_TYPES_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/dnnl_version.h b/venv/lib/python3.10/site-packages/torch/include/dnnl_version.h new file mode 100644 index 0000000000000000000000000000000000000000..32a3d5cf839b1d593f069520febfd60b323730e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/dnnl_version.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_VERSION_H +#define DNNL_VERSION_H + +#include "oneapi/dnnl/dnnl_version.h" + +#endif /* DNNL_VERSION_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/experiments-config.h b/venv/lib/python3.10/site-packages/torch/include/experiments-config.h new file mode 100644 index 0000000000000000000000000000000000000000..7c0cba4acdaef0784e7b96bfd6e755254d3eecb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/experiments-config.h @@ -0,0 +1,25 @@ +// Copyright 2023 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct xnn_experiment_config { + bool adaptive_avx_optimization; +}; + +struct xnn_experiment_config* xnn_get_experiment_config(); + +void xnn_experiment_enable_adaptive_avx_optimization(); + + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/fp16.h b/venv/lib/python3.10/site-packages/torch/include/fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..9d7366e997dadef17922225bcbb489288f6f9cdc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/fp16.h @@ -0,0 +1,11 @@ +#pragma once +#ifndef FP16_H +#define FP16_H + +#include + +#if defined(PSIMD_H) +#include +#endif + +#endif /* FP16_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/fxdiv.h b/venv/lib/python3.10/site-packages/torch/include/fxdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2c35038d97c55c524bb97caba2e3560cab9da504 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/fxdiv.h @@ -0,0 +1,425 @@ +#pragma once +#ifndef FXDIV_H +#define FXDIV_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include + #include +#endif + +#if defined(_MSC_VER) + #include + #if defined(_M_IX86) || defined(_M_X64) + #include + #endif +#endif + +#ifndef FXDIV_USE_INLINE_ASSEMBLY + #define FXDIV_USE_INLINE_ASSEMBLY 0 +#endif + +static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) { +#if defined(_MSC_VER) && defined(_M_IX86) + return (uint64_t) __emulu((unsigned int) a, (unsigned int) b); +#else + return (uint64_t) a * (uint64_t) b; +#endif +} + +static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b); +#elif defined(_MSC_VER) && defined(_M_IX86) + return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32); +#elif defined(_MSC_VER) && defined(_M_ARM) + return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b); +#else + return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32); +#endif +} + +static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b); +#elif defined(_MSC_VER) && defined(_M_X64) + return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b); +#elif defined(__GNUC__) && defined(__SIZEOF_INT128__) + return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64); +#else + const uint32_t a_lo = (uint32_t) a; + const uint32_t a_hi = (uint32_t) (a >> 32); + const uint32_t b_lo = (uint32_t) b; + const uint32_t b_hi = (uint32_t) (b >> 32); + + const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) + + (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo); + return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) + + ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32); +#endif +} + +static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) { +#if SIZE_MAX == UINT32_MAX + return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b); +#elif SIZE_MAX == UINT64_MAX + return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b); +#else + #error Unsupported platform +#endif +} + +struct fxdiv_divisor_uint32_t { + uint32_t value; + uint32_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint32_t { + uint32_t quotient; + uint32_t remainder; +}; + +struct fxdiv_divisor_uint64_t { + uint64_t value; + uint64_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint64_t { + uint64_t quotient; + uint64_t remainder; +}; + +struct fxdiv_divisor_size_t { + size_t value; + size_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_size_t { + size_t quotient; + size_t remainder; +}; + +static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) { + struct fxdiv_divisor_uint32_t result = { d }; + if (d == 1) { + result.m = UINT32_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t l_minus_1 = 31 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t l_minus_1 = 31 - __clz((int) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse(&l_minus_1, (unsigned long) (d - 1)); + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t l_minus_1; + __asm__("BSRL %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1); + #else + /* Based on Algorithm 2 from Hacker's delight */ + + uint32_t l_minus_1 = 0; + uint32_t x = d - 1; + uint32_t y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + #endif + uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */ + #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t q; + __asm__("DIVL %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (0) + : "cc"); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64)) + unsigned int remainder; + const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder); + #else + const uint32_t q = ((uint64_t) u_hi << 32) / d; + #endif + + result.m = q + UINT32_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) { + struct fxdiv_divisor_uint64_t result = { d }; + if (d == 1) { + result.m = UINT64_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t nlz_d = clz(d); + const uint32_t l_minus_1 = 63 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t nlz_d = __clzll((long long) d); + const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1)); + unsigned long bsr_d; + _BitScanReverse64(&bsr_d, (unsigned __int64) d); + const uint32_t nlz_d = bsr_d ^ 0x3F; + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM)) + const uint64_t d_minus_1 = d - 1; + const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0; + unsigned long l_minus_1; + if ((uint32_t) (d_minus_1 >> 32) == 0) { + _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1); + } else { + _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32)); + l_minus_1 += 32; + } + const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2; + #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t l_minus_1; + __asm__("BSRQ %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1); + const uint32_t nlz_d = __builtin_clzll(d); + #else + /* Based on Algorithm 2 from Hacker's delight */ + const uint64_t d_minus_1 = d - 1; + const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0; + uint32_t l_minus_1 = 0; + uint32_t x = (uint32_t) d_minus_1; + uint32_t y = d_minus_1 >> 32; + if (y != 0) { + l_minus_1 += 32; + x = y; + } + y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2; + #endif + uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */ + #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t q; + __asm__("DIVQ %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (UINT64_C(0)) + : "cc"); + #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__) + /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */ + const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d)); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64) + unsigned __int64 remainder; + const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder); + #else + /* Implementation based on code from Hacker's delight */ + + /* Normalize divisor and shift divident left */ + d <<= nlz_d; + u_hi <<= nlz_d; + /* Break divisor up into two 32-bit digits */ + const uint64_t d_hi = (uint32_t) (d >> 32); + const uint32_t d_lo = (uint32_t) d; + + /* Compute the first quotient digit, q1 */ + uint64_t q1 = u_hi / d_hi; + uint64_t r1 = u_hi - q1 * d_hi; + + while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) { + q1 -= 1; + r1 += d_hi; + if ((r1 >> 32) != 0) { + break; + } + } + + /* Multiply and subtract. */ + u_hi = (u_hi << 32) - q1 * d; + + /* Compute the second quotient digit, q0 */ + uint64_t q0 = u_hi / d_hi; + uint64_t r0 = u_hi - q0 * d_hi; + + while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) { + q0 -= 1; + r0 += d_hi; + if ((r0 >> 32) != 0) { + break; + } + } + const uint64_t q = (q1 << 32) | (uint32_t) q0; + #endif + result.m = q + UINT64_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d); +#else + #error Unsupported platform +#endif + struct fxdiv_divisor_size_t size_result = { + (size_t) uint_result.value, + (size_t) uint_result.m, + uint_result.s1, + uint_result.s2 + }; + return size_result; +} + +static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint32_divisor = { + (uint32_t) divisor.value, + (uint32_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint64_divisor = { + (uint64_t) divisor.value, + (uint64_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor); +#else + #error Unsupported platform +#endif +} + +static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity); + return quotient * granularity.value; +} + +static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity); + return quotient * granularity.value; +} + +static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) { + const size_t quotient = fxdiv_quotient_size_t(n, granularity); + return quotient * granularity.value; +} + +static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + const uint32_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint32_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + const uint64_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint64_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + const size_t remainder = n - quotient * divisor.value; + struct fxdiv_result_size_t result = { quotient, remainder }; + return result; +} + +#endif /* FXDIV_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/libshm.h b/venv/lib/python3.10/site-packages/torch/include/libshm.h new file mode 100644 index 0000000000000000000000000000000000000000..28024aa2338d1f46ce280abeb92a633f89be1385 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/libshm.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#ifdef __cplusplus + +void libshm_init(const char* manager_exec_path); + +// Superclass to run a constructor before at::RefcountedMapAllocator +class THManagedMapAllocatorInit { + protected: + THManagedMapAllocatorInit(const char* manager_handle, const char* filename); + std::string manager_handle_; +}; + +// Like a at::RefcountedMapAllocator, but it also makes use of an external +// shared memory manager process to ensure that shared memory regions actually +// get freed in the end (even if processes lose the memory). +class THManagedMapAllocator : private THManagedMapAllocatorInit, + public at::RefcountedMapAllocator { + public: + THManagedMapAllocator( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + + void close() override; + + ~THManagedMapAllocator() override { + close(); + } + + static at::DataPtr makeDataPtr( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + static THManagedMapAllocator* fromDataPtr(const at::DataPtr&); + + const char* manager_handle() const { + return manager_handle_.c_str(); + } +}; + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/nnpack.h b/venv/lib/python3.10/site-packages/torch/include/nnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..97b5ff390076e9ab7ae91e67bfc0d78736aaeffd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/nnpack.h @@ -0,0 +1,659 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any NNPACK function call. + */ +enum nnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + nnp_status_success = 0, + /** NNPACK function was called with batch_size == 0. */ + nnp_status_invalid_batch_size = 2, + /** NNPACK function was called with channels == 0. */ + nnp_status_invalid_channels = 3, + /** NNPACK function was called with input_channels == 0. */ + nnp_status_invalid_input_channels = 4, + /** NNPACK function was called with output_channels == 0. */ + nnp_status_invalid_output_channels = 5, + /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */ + nnp_status_invalid_input_size = 10, + /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */ + nnp_status_invalid_input_stride = 11, + /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.: + * + * - input_padding.left >= kernel_size.width (>= pooling_size.width) + * - input_padding.right >= kernel_size.width (>= pooling_size.width) + * - input_padding.top >= kernel_size.height (>= pooling_size.height) + * - input_padding.bottom >= kernel_size.height (>= pooling_size.height) + */ + nnp_status_invalid_input_padding = 12, + /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */ + nnp_status_invalid_kernel_size = 13, + /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */ + nnp_status_invalid_pooling_size = 14, + /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */ + nnp_status_invalid_pooling_stride = 15, + /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */ + nnp_status_invalid_algorithm = 16, + /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */ + nnp_status_invalid_transform_strategy = 17, + /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */ + nnp_status_invalid_output_subsampling = 13, + /** NNPACK function was called with activation not in nnp_activation enum */ + nnp_status_invalid_activation = 14, + /** NNPACK function was called with invalid activation parameters */ + nnp_status_invalid_activation_parameters = 15, + + /** NNPACK does not support the particular input size for the function */ + nnp_status_unsupported_input_size = 20, + /** NNPACK does not support the particular input stride for the function */ + nnp_status_unsupported_input_stride = 21, + /** NNPACK does not support the particular input padding for the function */ + nnp_status_unsupported_input_padding = 22, + /** NNPACK does not support the particular kernel size for the function */ + nnp_status_unsupported_kernel_size = 23, + /** NNPACK does not support the particular pooling size for the function */ + nnp_status_unsupported_pooling_size = 24, + /** NNPACK does not support the particular pooling stride for the function */ + nnp_status_unsupported_pooling_stride = 25, + /** NNPACK does not support the particular convolution algorithm for the function */ + nnp_status_unsupported_algorithm = 26, + /** NNPACK does not support the particular convolution transform strategy for the algorithm */ + nnp_status_unsupported_transform_strategy = 27, + /** NNPACK does not support the particular activation function for the function */ + nnp_status_unsupported_activation = 28, + /** NNPACK does not support the particular activation function parameters for the function */ + nnp_status_unsupported_activation_parameters = 29, + + /** NNPACK function was called before the library was initialized */ + nnp_status_uninitialized = 50, + /** NNPACK does not implement this function for the host CPU */ + nnp_status_unsupported_hardware = 51, + /** NNPACK failed to allocate memory for temporary buffers */ + nnp_status_out_of_memory = 52, + /** Scratch space buffer is too small */ + nnp_status_insufficient_buffer = 53, + /** Scratch space buffer is not properly aligned */ + nnp_status_misaligned_buffer = 54 +}; + +/** + * @brief Activation applied applied after a convolutional or fully-connected layer. + */ +enum nnp_activation { + /** Identity activation f(x) := x, i.e. no transformation */ + nnp_activation_identity = 0, + /** ReLU activation f(x) := max(0, x) */ + nnp_activation_relu = 1, +}; + +/** + * @brief Algorithm for computing convolutional layers. + */ +enum nnp_convolution_algorithm { + /** Let NNPACK choose the algorithm depending on layer parameters */ + nnp_convolution_algorithm_auto = 0, + /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */ + nnp_convolution_algorithm_ft8x8 = 1, + /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */ + nnp_convolution_algorithm_ft16x16 = 2, + /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */ + nnp_convolution_algorithm_wt8x8 = 3, + /** Direct convolution via implicit GEMM. */ + nnp_convolution_algorithm_implicit_gemm = 4, + /** Direct convolution implementation. */ + nnp_convolution_algorithm_direct = 5, + /** + * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16. + * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP), + * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8. + */ + nnp_convolution_algorithm_wt8x8_fp16 = 6, +}; + +enum nnp_convolution_transform_strategy { + nnp_convolution_transform_strategy_compute = 1, + nnp_convolution_transform_strategy_precompute = 2, + nnp_convolution_transform_strategy_reuse = 3 +}; + +/* For backward compatibility */ +#define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute +#define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute + +/** + * @brief Size of images, kernels, and pooling filters in NNPACK. + */ +struct nnp_size { + /** Width (horizontal size) of an image, kernel, or pooling filter. */ + size_t width; + /** Height (vertical size) of an image, kernel, or pooling filter. */ + size_t height; +}; + +/** + * @brief Padding of images in NNPACK. + */ +struct nnp_padding { + /** Padding above the image data */ + size_t top; + /** Padding on the right of image data */ + size_t right; + /** Padding below the image data */ + size_t bottom; + /** Padding on the left of image data */ + size_t left; +}; + +/** + * @brief Profiling information about time spent in different phases of a function call. + */ +struct nnp_profile { + /** Time spent inside the function call, in seconds. */ + double total; + /** Time spend on transformation of the input or input gradient tensor, in seconds. */ + double input_transform; + /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */ + double kernel_transform; + /** Time spend on transformation of the output or output gradient tensor, in seconds. */ + double output_transform; + /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */ + double block_multiplication; +}; + +enum nnp_status nnp_initialize(void); + +enum nnp_status nnp_deinitialize(void); + +/** + * @brief Computes output of a 2D convolutional layer from input and kernel tensors. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_convolution_inference for optimal performance. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ + +enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients). + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* grad_output, + const float* kernel, + float* grad_input, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[out] grad_kernel A 4D tensor + * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* grad_output, + float* grad_kernel, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param transform_strategy A strategy that guides computation of kernel transforms coefficients. + * Possible values are: + * + * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed + * coefficients. + * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed + * coefficients. + * + * @param input_channels The number of channels (AKA features, dimensions) in the input image. + * @param output_channels The number of channels (AKA features, dimensions) in the output image. + * @param input_size Size of input image, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input image. + * @param kernel_size Kernel size. + * @param output_subsampling Subsample region for output, also known as convolution stride. + * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes. + * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size + * of required workspace memory at the workspace_size location, and exit without + * computations. + * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory + * before and deallocate after this computation, potentially at significant runtime cost. + * @param[in,out] workspace_size Pointer to the size of workspace buffer. + * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to + * the location specified by this pointer. + * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of + * the buffer, in bytes. + * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK + * would allocate memory before and deallocate after this computation, potentially at + * significant runtime cost. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer from input and kernel matrices. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_fully_connected_inference for optimal performance. + * @param batch_size The number of vectors on the input and output of the fully connected layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input matrix. + * @param output_channels The number of channels (AKA features, dimensions) in the output matrix. + * @param[in] input A 2D matrix input[batch_size][input_channels]. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels]. + * @param[out] output A 2D matrix output[batch_size][output_channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_output( + size_t batch_size, + size_t input_channels, + size_t output_channels, + const float input[], + const float kernel[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference( + size_t input_channels, + size_t output_channels, + const float* input, + const float* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference_f16f32( + size_t input_channels, + size_t output_channels, + const float* input, + const void* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a max-pooling layer for an input tensor. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of images on the input and output of the max-pooling layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but + * affect the output size. + * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported. + * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported. + * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width]. + * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where + * output_size.height = ceil( + * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) / + * pooling_stride.height) + 1 + * output_size.width = ceil( + * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) / + * pooling_stride.width) + 1 + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_max_pooling_output( + size_t batch_size, + size_t channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size pooling_size, + struct nnp_size pooling_stride, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a softmax layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the softmax layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output vectors. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_softmax_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + float negative_slope, + pthreadpool_t threadpool); + +/** + * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_input_gradient( + size_t batch_size, + size_t channels, + const float grad_output[], + const float input[], + float grad_input[], + float negative_slope, + pthreadpool_t threadpool); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +// Backward compatible implementations for nnp_convolution_*, if we are in C++ +// mode. +inline enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_output( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, kernel, bias, output, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float grad_output[], + const float kernel[], + float grad_input[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_input_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + grad_output, kernel, grad_input, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float grad_output[], + float grad_kernel[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_kernel_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, grad_output, grad_kernel, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) { + return nnp_convolution_inference( + algorithm, transform_strategy, + input_channels, output_channels, + input_size, input_padding, kernel_size, output_subsampling, + input, kernel, bias, output, NULL, NULL, + nnp_activation_identity, NULL, + threadpool, profile); +} + +#endif // __cplusplus diff --git a/venv/lib/python3.10/site-packages/torch/include/psimd.h b/venv/lib/python3.10/site-packages/torch/include/psimd.h new file mode 100644 index 0000000000000000000000000000000000000000..b7cb65d799c98931a73b3184511b1bd8c2b30ec0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/psimd.h @@ -0,0 +1,1384 @@ +#pragma once +#ifndef PSIMD_H +#define PSIMD_H + +#if defined(__CUDA_ARCH__) + /* CUDA compiler */ + #define PSIMD_INTRINSIC __forceinline__ __device__ +#elif defined(__OPENCL_VERSION__) + /* OpenCL compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__INTEL_COMPILER) + /* Intel compiler, even on Windows */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(__GNUC__) + /* GCC-compatible compiler (gcc/clang/icc) */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(_MSC_VER) + /* MSVC-compatible compiler (cl/icl/clang-cl) */ + #define PSIMD_INTRINSIC __forceinline static +#elif defined(__cplusplus) + /* Generic C++ compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + /* Generic C99 compiler */ + #define PSIMD_INTRINSIC inline static +#else + /* Generic C compiler */ + #define PSIMD_INTRINSIC static +#endif + +#if defined(__GNUC__) || defined(__clang__) + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + #include + #endif + + #if defined(__SSE2__) + #include + #endif + + #if defined(__SSE3__) + #include + #endif + + #if defined(__SSSE3__) + #include + #endif + + #if defined(__SSE4_1__) + #include + #endif + + #if defined(__SSE4_2__) + #include + #endif + + #if defined(__AVX__) + #include + #endif +#elif defined(_MSC_VER) + #include +#endif + +#if defined(__cplusplus) + #define PSIMD_CXX_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) + #define PSIMD_C11_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + #define PSIMD_C99_SYNTAX +#else + #define PSIMD_C89_SYNTAX +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define PSIMD_HAVE_F64 0 + #define PSIMD_HAVE_F32 1 + #define PSIMD_HAVE_U8 1 + #define PSIMD_HAVE_S8 1 + #define PSIMD_HAVE_U16 1 + #define PSIMD_HAVE_S16 1 + #define PSIMD_HAVE_U32 1 + #define PSIMD_HAVE_S32 1 + #define PSIMD_HAVE_U64 0 + #define PSIMD_HAVE_S64 0 + + typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1))); + typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1))); + typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2))); + typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2))); + typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4))); + typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4))); + typedef float psimd_f32 __attribute__((vector_size(16), aligned(4))); + + typedef struct { + psimd_s8 lo; + psimd_s8 hi; + } psimd_s8x2; + + typedef struct { + psimd_u8 lo; + psimd_u8 hi; + } psimd_u8x2; + + typedef struct { + psimd_s16 lo; + psimd_s16 hi; + } psimd_s16x2; + + typedef struct { + psimd_u16 lo; + psimd_u16 hi; + } psimd_u16x2; + + typedef struct { + psimd_s32 lo; + psimd_s32 hi; + } psimd_s32x2; + + typedef struct { + psimd_u32 lo; + psimd_u32 hi; + } psimd_u32x2; + + typedef struct { + psimd_f32 lo; + psimd_f32 hi; + } psimd_f32x2; + + /* Bit casts */ + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + /* Swap */ + PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) { + const psimd_s8 new_a = *b; + const psimd_s8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) { + const psimd_u8 new_a = *b; + const psimd_u8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) { + const psimd_s16 new_a = *b; + const psimd_s16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) { + const psimd_u16 new_a = *b; + const psimd_u16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) { + const psimd_s32 new_a = *b; + const psimd_s32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) { + const psimd_u32 new_a = *b; + const psimd_u32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) { + const psimd_f32 new_a = *b; + const psimd_f32 new_b = *a; + *a = new_a; + *b = new_b; + } + + /* Zero-initialization */ + PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) { + return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) { + return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) { + return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) { + return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) { + return (psimd_s32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) { + return (psimd_u32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) { + return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f }; + } + + /* Initialization to the same constant */ + PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) { + return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) { + return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) { + return (psimd_s16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) { + return (psimd_u16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) { + return (psimd_s32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) { + return (psimd_u32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) { + return (psimd_f32) { c, c, c, c }; + } + + /* Load vector */ + PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) { + return *((const psimd_s8*) address); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) { + return *((const psimd_u8*) address); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) { + return *((const psimd_s16*) address); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) { + return *((const psimd_u16*) address); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) { + return *((const psimd_s32*) address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) { + return *((const psimd_u32*) address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) { + return *((const psimd_f32*) address); + } + + PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) { + return psimd_splat_s8(*((const int8_t*) address)); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) { + return psimd_splat_u8(*((const uint8_t*) address)); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) { + return psimd_splat_s16(*((const int16_t*) address)); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) { + return psimd_splat_u16(*((const uint16_t*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) { + return psimd_splat_s32(*((const int32_t*) address)); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) { + return psimd_splat_u32(*((const uint32_t*) address)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) { + return psimd_splat_f32(*((const float*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) { + return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) { + return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) { + return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) { + return psimd_load_s32(address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) { + return psimd_load_u32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7); + #else + return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6); + #else + return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) { + return psimd_load_stride2_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + const float* address3_f32 = address2_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) { + return psimd_load1_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) { + return psimd_load_stride_f32(address, stride); + } + + /* Store vector */ + PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) { + *((psimd_s8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) { + *((psimd_u8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) { + *((psimd_s16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) { + *((psimd_u16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) { + *((psimd_s32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) { + *((psimd_u32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) { + *((psimd_f32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) { + *((int32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) { + *((uint32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) { + *((float*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + address_s32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + address_u32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + address_f32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) { + psimd_store_s32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) { + psimd_store_u32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) { + psimd_store_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + float* address3_f32 = address2_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + *address3_f32 = value[3]; + } + + PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) { + psimd_store1_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[stride] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + } + + /* Vector addition */ + PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a + b; + #endif + } + + /* Vector subtraction */ + PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a - b; + #endif + } + + /* Vector multiplication */ + PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a * b; + #endif + } + + /* Quasi-Fused Multiply-Add */ + PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) { + #if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA) + return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__) + return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__) + return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA + return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c); + #else + return a + b * c; + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) { + return a / b; + } + + /* Vector and */ + PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (mask & (psimd_s32) v); + } + + /* Vector and-not */ + PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (~mask & (psimd_s32) v); + } + + /* Vector blend */ + PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s8) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u8) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s16) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u16) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s32) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_f32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + /* Vector blend on sign */ + PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) { + return psimd_blend_s8(x >> psimd_splat_s8(7), a, b); + } + + PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) { + return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b); + } + + PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) { + return psimd_blend_s16(x >> psimd_splat_s16(15), a, b); + } + + PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) { + return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b); + } + + PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) { + return psimd_blend_s32(x >> psimd_splat_s32(31), a, b); + } + + PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) { + return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b); + } + + PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) { + const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31); + return psimd_blend_f32(mask, a, b); + } + + /* Vector absolute value */ + PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v & ~mask); + } + + /* Vector negation */ + PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v ^ mask); + } + + /* Vector maximum */ + PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_max_f32x4(a, b); + #else + return psimd_blend_f32(a > b, a, b); + #endif + } + + /* Vector minimum */ + PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_min_f32x4(a, b); + #else + return psimd_blend_f32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) { + #if defined(__clang__) + return __builtin_convertvector(v, psimd_f32); + #elif defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vcvtq_f32_s32((int32x4_t) v); + #elif defined(__SSE2__) + return (psimd_f32) _mm_cvtepi32_ps((__m128i) v); + #else + return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] }; + #endif + } + + /* Broadcast vector element */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 0, 0, 0, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 1, 1, 1, 1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 2, 2, 2, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 3, 3, 3); + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 }); + } + #endif + + /* Reversal of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + #endif + + /* Interleaving of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + #endif + + /* Concatenation of low/high vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + #endif + + /* Concatenation of even/odd vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + #endif + + /* Vector reduce */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1); + return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1); + const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }); + return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_sum_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_max_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_min_f32(v); + return result[0]; + } + #endif +#endif + +#endif /* PSIMD_H */ diff --git a/venv/lib/python3.10/site-packages/torch/include/pthreadpool.h b/venv/lib/python3.10/site-packages/torch/include/pthreadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..953ccc4cc24070aa4897fabc081cba466e34170a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/pthreadpool.h @@ -0,0 +1,2555 @@ +#ifndef PTHREADPOOL_H_ +#define PTHREADPOOL_H_ + +#include +#include + +typedef struct pthreadpool* pthreadpool_t; + +typedef void (*pthreadpool_task_1d_t)(void*, size_t); +typedef void (*pthreadpool_task_1d_with_thread_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_with_thread_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_thread_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_2d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); + + +/** + * Disable support for denormalized numbers to the maximum extent possible for + * the duration of the computation. + * + * Handling denormalized floating-point numbers is often implemented in + * microcode, and incurs significant performance degradation. This hint + * instructs the thread pool to disable support for denormalized numbers before + * running the computation by manipulating architecture-specific control + * registers, and restore the initial value of control registers after the + * computation is complete. The thread pool temporary disables denormalized + * numbers on all threads involved in the computation (i.e. the caller threads, + * and potentially worker threads). + * + * Disabling denormalized numbers may have a small negative effect on results' + * accuracy. As various architectures differ in capabilities to control + * processing of denormalized numbers, using this flag may also hurt results' + * reproducibility across different instruction set architectures. + */ +#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001 + +/** + * Yield worker threads to the system scheduler after the operation is finished. + * + * Force workers to use kernel wait (instead of active spin-wait by default) for + * new commands after this command is processed. This flag affects only the + * immediate next operation on this thread pool. To make the thread pool always + * use kernel wait, pass this flag to all parallelization functions. + */ +#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Create a thread pool with the specified number of threads. + * + * @param threads_count the number of threads in the thread pool. + * A value of 0 has special interpretation: it creates a thread pool with as + * many threads as there are logical processors in the system. + * + * @returns A pointer to an opaque thread pool object if the call is + * successful, or NULL pointer if the call failed. + */ +pthreadpool_t pthreadpool_create(size_t threads_count); + +/** + * Query the number of threads in a thread pool. + * + * @param threadpool the thread pool to query. + * + * @returns The number of threads in the thread pool. + */ +size_t pthreadpool_get_threads_count(pthreadpool_t threadpool); + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, thread_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_thread_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range; i++) + * function(context, uarch_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range the number of items on the 1D grid to process. + * The specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * function(context, i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_tile_1d_t function, + void* context, + size_t range, + size_t tile, + uint32_t flags); + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, thread_index, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, thread_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, + * cpuinfo initialization failed, or index returned + * by cpuinfo_get_current_uarch_index() exceeds + * the max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected + * by the specified function. If the index returned + * by cpuinfo_get_current_uarch_index() exceeds this + * value, default_uarch_index will be used instead. + * default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 2D grid. + * @param range_j the number of items to process along the second + * dimension of the 2D grid. + * @param tile_j the maximum number of items along the first + * dimension of the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second + * dimension of the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * function(context, i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + pthreadpool_task_3d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension and passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_j the maximum number of items along the second + * dimension of the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * function(context, i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + pthreadpool_task_4d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, uarch_index, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 4D grid. + * @param range_j the number of items to process along the second + * dimension of the 4D grid. + * @param range_k the number of items to process along the third + * dimension of the 4D grid. + * @param range_l the number of items to process along the fourth + * dimension of the 4D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth + * dimension of the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * function(context, i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + pthreadpool_task_5d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one function call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * function(context, i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one function call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags); + +/** + * Terminates threads in the thread pool and releases associated resources. + * + * @warning Accessing the thread pool after a call to this function constitutes + * undefined behaviour and may cause data corruption. + * + * @param[in,out] threadpool The thread pool to destroy. + */ +void pthreadpool_destroy(pthreadpool_t threadpool); + +#ifndef PTHREADPOOL_NO_DEPRECATED_API + +/* Legacy API for compatibility with pre-existing users (e.g. NNPACK) */ +#if defined(__GNUC__) + #define PTHREADPOOL_DEPRECATED __attribute__((__deprecated__)) +#else + #define PTHREADPOOL_DEPRECATED +#endif + +typedef void (*pthreadpool_function_1d_t)(void*, size_t); +typedef void (*pthreadpool_function_1d_tiled_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_3d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_4d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +void pthreadpool_compute_1d( + pthreadpool_t threadpool, + pthreadpool_function_1d_t function, + void* argument, + size_t range) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_1d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_1d_tiled_t function, + void* argument, + size_t range, + size_t tile) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d( + pthreadpool_t threadpool, + pthreadpool_function_2d_t function, + void* argument, + size_t range_i, + size_t range_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_2d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_3d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_3d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_i, + size_t tile_j, + size_t tile_k) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_4d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_4d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_i, + size_t tile_j, + size_t tile_k, + size_t tile_l) PTHREADPOOL_DEPRECATED; + +#endif /* PTHREADPOOL_NO_DEPRECATED_API */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus + +namespace libpthreadpool { +namespace detail { +namespace { + +template +void call_wrapper_1d(void* arg, size_t i) { + (*static_cast(arg))(i); +} + +template +void call_wrapper_1d_tile_1d(void* arg, size_t range_i, size_t tile_i) { + (*static_cast(arg))(range_i, tile_i); +} + +template +void call_wrapper_2d(void* functor, size_t i, size_t j) { + (*static_cast(functor))(i, j); +} + +template +void call_wrapper_2d_tile_1d(void* functor, + size_t i, size_t range_j, size_t tile_j) +{ + (*static_cast(functor))(i, range_j, tile_j); +} + +template +void call_wrapper_2d_tile_2d(void* functor, + size_t range_i, size_t range_j, + size_t tile_i, size_t tile_j) +{ + (*static_cast(functor))(range_i, range_j, tile_i, tile_j); +} + +template +void call_wrapper_3d(void* functor, size_t i, size_t j, size_t k) { + (*static_cast(functor))(i, j, k); +} + +template +void call_wrapper_3d_tile_1d(void* functor, + size_t i, size_t j, size_t range_k, + size_t tile_k) +{ + (*static_cast(functor))(i, j, range_k, tile_k); +} + +template +void call_wrapper_3d_tile_2d(void* functor, + size_t i, size_t range_j, size_t range_k, + size_t tile_j, size_t tile_k) +{ + (*static_cast(functor))(i, range_j, range_k, tile_j, tile_k); +} + +template +void call_wrapper_4d(void* functor, size_t i, size_t j, size_t k, size_t l) { + (*static_cast(functor))(i, j, k, l); +} + +template +void call_wrapper_4d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, + size_t tile_l) +{ + (*static_cast(functor))(i, j, k, range_l, tile_l); +} + +template +void call_wrapper_4d_tile_2d(void* functor, + size_t i, size_t j, size_t range_k, size_t range_l, + size_t tile_k, size_t tile_l) +{ + (*static_cast(functor))(i, j, range_k, range_l, tile_k, tile_l); +} + +template +void call_wrapper_5d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m) { + (*static_cast(functor))(i, j, k, l, m); +} + +template +void call_wrapper_5d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, + size_t tile_m) +{ + (*static_cast(functor))(i, j, k, l, range_m, tile_m); +} + +template +void call_wrapper_5d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, size_t range_m, + size_t tile_l, size_t tile_m) +{ + (*static_cast(functor))(i, j, k, range_l, range_m, tile_l, tile_m); +} + +template +void call_wrapper_6d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + (*static_cast(functor))(i, j, k, l, m, n); +} + +template +void call_wrapper_6d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t m, size_t range_n, + size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, m, range_n, tile_n); +} + +template +void call_wrapper_6d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, size_t range_n, + size_t tile_m, size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, range_m, range_n, tile_m, tile_n); +} + +} /* namespace */ +} /* namespace detail */ +} /* namespace libpthreadpool */ + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * functor(i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range the number of items on the 1D grid to process. The + * specified functor will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d, + const_cast(static_cast(&functor)), + range, + flags); +} + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * functor(i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + size_t tile, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d_tile_1d, + const_cast(static_cast(&functor)), + range, + tile, + flags); +} + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * functor(i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one functor call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_i, + tile_j, + flags); +} + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * functor(i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one functor call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_j, + tile_k, + flags); +} + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * functor(i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one functor call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_k, + tile_l, + flags); +} + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * functor(i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one functor call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_l, + tile_m, + flags); +} + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * functor(i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one functor call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_m, + tile_n, + flags); +} + +#endif /* __cplusplus */ + +#endif /* PTHREADPOOL_H_ */ diff --git a/venv/lib/python3.10/site-packages/torch/include/qnnpack.h b/venv/lib/python3.10/site-packages/torch/include/qnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..591fa68eba5a3c8a6b22c12c4fa6efbefd098b84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/qnnpack.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any QNNPACK function call. + */ +enum qnnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + qnnp_status_success = 0, + qnnp_status_uninitialized = 1, + qnnp_status_invalid_parameter = 2, + qnnp_status_unsupported_parameter = 3, + qnnp_status_unsupported_hardware = 4, + qnnp_status_out_of_memory = 5, +}; + +enum qnnp_status qnnp_initialize(void); + +enum qnnp_status qnnp_deinitialize(void); + +typedef struct qnnp_operator* qnnp_operator_t; + +enum qnnp_status qnnp_create_convolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* convolution); + +enum qnnp_status qnnp_setup_convolution2d_nhwc_q8( + qnnp_operator_t convolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_deconvolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* deconvolution); + +enum qnnp_status qnnp_setup_deconvolution2d_nhwc_q8( + qnnp_operator_t deconvolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_fully_connected_nc_q8( + size_t input_channels, + size_t output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* fully_connected); + +enum qnnp_status qnnp_setup_fully_connected_nc_q8( + qnnp_operator_t fully_connected, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_global_average_pooling_nwc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* global_average_pooling); + +enum qnnp_status qnnp_setup_global_average_pooling_nwc_q8( + qnnp_operator_t global_average_pooling, + size_t batch_size, + size_t width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_average_pooling2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* average_pooling); + +enum qnnp_status qnnp_setup_average_pooling2d_nhwc_q8( + qnnp_operator_t average_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* max_pooling); + +enum qnnp_status qnnp_setup_max_pooling2d_nhwc_u8( + qnnp_operator_t max_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + uint32_t flags, + qnnp_operator_t* channel_shuffle); + +enum qnnp_status qnnp_setup_channel_shuffle_nc_x8( + qnnp_operator_t channel_shuffle, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_add_nc_q8( + size_t channels, + uint8_t a_zero_point, + float a_scale, + uint8_t b_zero_point, + float b_scale, + uint8_t sum_zero_point, + float sum_scale, + uint8_t sum_min, + uint8_t sum_max, + uint32_t flags, + qnnp_operator_t* add); + +enum qnnp_status qnnp_setup_add_nc_q8( + qnnp_operator_t add, + size_t batch_size, + const uint8_t* a, + size_t a_stride, + const uint8_t* b, + size_t b_stride, + uint8_t* sum, + size_t sum_stride); + +enum qnnp_status qnnp_create_clamp_nc_u8( + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* clamp); + +enum qnnp_status qnnp_setup_clamp_nc_u8( + qnnp_operator_t clamp, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_sigmoid_nc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* sigmoid); + +enum qnnp_status qnnp_setup_sigmoid_nc_q8( + qnnp_operator_t sigmoid, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_leaky_relu_nc_q8( + size_t channels, + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* leaky_relu); + +enum qnnp_status qnnp_setup_leaky_relu_nc_q8( + qnnp_operator_t leaky_relu, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_softargmax_nc_q8( + size_t channels, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + qnnp_operator_t* softargmax); + +enum qnnp_status qnnp_setup_softargmax_nc_q8( + qnnp_operator_t softargmax, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_run_operator( + qnnp_operator_t op, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_delete_operator( + qnnp_operator_t op); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/qnnpack_func.h b/venv/lib/python3.10/site-packages/torch/include/qnnpack_func.h new file mode 100644 index 0000000000000000000000000000000000000000..10bbc000192d7e03745e2cf3fb263a9655cde00c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/qnnpack_func.h @@ -0,0 +1,166 @@ +#pragma once + +#include +#include + +namespace qnnpack { +class PrePackConvWeights final { + public: + PrePackConvWeights( + const pytorch_qnnp_operator_t convolution, + const uint8_t* kernel_zero_points, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + int64_t getOutputChannels() const + { + return output_channels_; + } + + ~PrePackConvWeights() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PrePackConvWeights() = delete; + PrePackConvWeights(const PrePackConvWeights&) = delete; + PrePackConvWeights& operator=(const PrePackConvWeights&) = delete; + + private: + void* packed_weights_ = nullptr; + int64_t output_channels_; +}; + +class PackBMatrix final { + public: + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t* kernel_zero_points, + const float* requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + // This constructor is to be used for dynamic mode + // quantization. In dynamic mode, we dont yet support + // per channel quantization, and paying the cost of + // memory allocation for per channel zero point and + // requant scale will hurt performance. + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t kernel_zero_point, + const float requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + void unpackWeights( + const uint8_t* kernel_zero_points, + int8_t* kernel + ) const; + + size_t getInputChannels() const + { + return input_channels_; + } + + size_t getOutputChannels() const + { + return output_channels_; + } + + ~PackBMatrix() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PackBMatrix() = delete; + PackBMatrix(const PackBMatrix&) = delete; + PackBMatrix& operator=(const PackBMatrix&) = delete; + + private: + void* packed_weights_ = nullptr; + size_t input_channels_; + size_t output_channels_; +}; + +enum pytorch_qnnp_status qnnpackLinear( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + uint8_t* output, + const size_t output_stride, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackConv( + const pytorch_qnnp_operator_t convolution, + void* packed_weights, + const size_t batch_size, + const size_t input_depth, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackDeConv( + const pytorch_qnnp_operator_t deconvolution, + void* packed_weights, + const size_t batch_size, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackLinearDynamic( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* dequantization_scales, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + const float* bias, + float* output, + const size_t output_stride, + pthreadpool_t threadpool); + +} // namespace qnnpack diff --git a/venv/lib/python3.10/site-packages/torch/include/sleef.h b/venv/lib/python3.10/site-packages/torch/include/sleef.h new file mode 100644 index 0000000000000000000000000000000000000000..de36514f991a5f9b4774b232a1a6350c47c2c74c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/sleef.h @@ -0,0 +1,4459 @@ +// Copyright Naoki Shibata and contributors 2010 - 2020. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef __SLEEF_H__ +#define __SLEEF_H__ + +#define SLEEF_VERSION_MAJOR 3 +#define SLEEF_VERSION_MINOR 6 +#define SLEEF_VERSION_PATCHLEVEL 0 + +#include +#include + +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define CONST const +#else +#define CONST +#endif + +#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__) +#ifndef FP_FAST_FMA +#define FP_FAST_FMA +#endif +#ifndef FP_FAST_FMAF +#define FP_FAST_FMAF +#endif +#endif + +#if defined(_MSC_VER) && !defined(__STDC__) +#define __STDC__ 1 +#endif + +#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllexport) +#else // #ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllimport) +#if (defined(_MSC_VER)) +#pragma comment(lib,"sleef.lib") +#endif // #if (defined(_MSC_VER)) +#endif // #ifdef IMPORT_IS_EXPORT +#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#define IMPORT +#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) + +#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__)) +#include +#endif + +#if (defined(_MSC_VER)) +#include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include +#endif + +#if defined(__ARM_FEATURE_SVE) +#include +#endif + +#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +#include +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +#if defined(__VX__) && defined(__VEC__) +#ifndef SLEEF_VECINTRIN_H_INCLUDED +#include +#define SLEEF_VECINTRIN_H_INCLUDED +#endif +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +// + +#ifndef SLEEF_FP_ILOGB0 +#define SLEEF_FP_ILOGB0 ((int)-2147483648) +#endif + +#ifndef SLEEF_FP_ILOGBNAN +#define SLEEF_FP_ILOGBNAN ((int)2147483647) +#endif + +// + +IMPORT void *Sleef_malloc(size_t z); +IMPORT void Sleef_free(void *ptr); +IMPORT uint64_t Sleef_currentTimeMicros(); + +#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER) +IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx); +#endif + +// + +#ifndef Sleef_double2_DEFINED +#define Sleef_double2_DEFINED +typedef struct { + double x, y; +} Sleef_double2; +#endif + +#ifndef Sleef_float2_DEFINED +#define Sleef_float2_DEFINED +typedef struct { + float x, y; +} Sleef_float2; +#endif + +#ifndef Sleef_longdouble2_DEFINED +#define Sleef_longdouble2_DEFINED +typedef struct { + long double x, y; +} Sleef_longdouble2; +#endif + +#if !defined(Sleef_quad_DEFINED) +#define Sleef_quad_DEFINED +#if defined(__SIZEOF_FLOAT128__) || (defined(__linux__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(__PPC64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 8) +typedef __float128 Sleef_quad; +#define SLEEF_QUAD_C(x) (x ## Q) +//#elif defined(__SIZEOF_LONG_DOUBLE__) && defined(__aarch64__) +//typedef long double Sleef_quad; +//#define SLEEF_QUAD_C(x) (x ## L) +#else +typedef struct { uint64_t x, y; } Sleef_quad; +#endif +#endif + +#if !defined(Sleef_quad2_DEFINED) +#define Sleef_quad2_DEFINED +typedef union { + struct { + Sleef_quad x, y; + }; + Sleef_quad s[2]; +} Sleef_quad2; +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + +IMPORT CONST double Sleef_sin_u35(double); +IMPORT CONST double Sleef_cos_u35(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u35(double); +IMPORT CONST double Sleef_tan_u35(double); +IMPORT CONST double Sleef_asin_u35(double); +IMPORT CONST double Sleef_acos_u35(double); +IMPORT CONST double Sleef_atan_u35(double); +IMPORT CONST double Sleef_atan2_u35(double, double); +IMPORT CONST double Sleef_log_u35(double); +IMPORT CONST double Sleef_cbrt_u35(double); +IMPORT CONST double Sleef_sin_u10(double); +IMPORT CONST double Sleef_cos_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u10(double); +IMPORT CONST double Sleef_tan_u10(double); +IMPORT CONST double Sleef_asin_u10(double); +IMPORT CONST double Sleef_acos_u10(double); +IMPORT CONST double Sleef_atan_u10(double); +IMPORT CONST double Sleef_atan2_u10(double, double); +IMPORT CONST double Sleef_log_u10(double); +IMPORT CONST double Sleef_cbrt_u10(double); +IMPORT CONST double Sleef_exp_u10(double); +IMPORT CONST double Sleef_pow_u10(double, double); +IMPORT CONST double Sleef_sinh_u10(double); +IMPORT CONST double Sleef_cosh_u10(double); +IMPORT CONST double Sleef_tanh_u10(double); +IMPORT CONST double Sleef_sinh_u35(double); +IMPORT CONST double Sleef_cosh_u35(double); +IMPORT CONST double Sleef_tanh_u35(double); +IMPORT CONST double Sleef_asinh_u10(double); +IMPORT CONST double Sleef_acosh_u10(double); +IMPORT CONST double Sleef_atanh_u10(double); +IMPORT CONST double Sleef_exp2_u10(double); +IMPORT CONST double Sleef_exp10_u10(double); +IMPORT CONST double Sleef_exp2_u35(double); +IMPORT CONST double Sleef_exp10_u35(double); +IMPORT CONST double Sleef_expm1_u10(double); +IMPORT CONST double Sleef_log10_u10(double); +IMPORT CONST double Sleef_log2_u10(double); +IMPORT CONST double Sleef_log2_u35(double); +IMPORT CONST double Sleef_log1p_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u05(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u35(double); +IMPORT CONST double Sleef_sinpi_u05(double); +IMPORT CONST double Sleef_cospi_u05(double); +IMPORT CONST double Sleef_ldexp(double, int); +IMPORT CONST int Sleef_ilogb(double); +IMPORT CONST double Sleef_fma(double, double, double); +IMPORT CONST double Sleef_sqrt(double); +IMPORT CONST double Sleef_sqrt_u05(double); +IMPORT CONST double Sleef_sqrt_u35(double); + +IMPORT CONST double Sleef_hypot_u05(double, double); +IMPORT CONST double Sleef_hypot_u35(double, double); + +IMPORT CONST double Sleef_fabs(double); +IMPORT CONST double Sleef_copysign(double, double); +IMPORT CONST double Sleef_fmax(double, double); +IMPORT CONST double Sleef_fmin(double, double); +IMPORT CONST double Sleef_fdim(double, double); +IMPORT CONST double Sleef_trunc(double); +IMPORT CONST double Sleef_floor(double); +IMPORT CONST double Sleef_ceil(double); +IMPORT CONST double Sleef_round(double); +IMPORT CONST double Sleef_rint(double); +IMPORT CONST double Sleef_nextafter(double, double); +IMPORT CONST double Sleef_frfrexp(double); +IMPORT CONST int Sleef_expfrexp(double); +IMPORT CONST double Sleef_fmod(double, double); +IMPORT CONST double Sleef_remainder(double, double); +IMPORT CONST Sleef_double2 Sleef_modf(double); + +IMPORT CONST double Sleef_lgamma_u10(double); +IMPORT CONST double Sleef_tgamma_u10(double); +IMPORT CONST double Sleef_erf_u10(double); +IMPORT CONST double Sleef_erfc_u15(double); + +IMPORT CONST float Sleef_sinf_u35(float); +IMPORT CONST float Sleef_cosf_u35(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u35(float); +IMPORT CONST float Sleef_tanf_u35(float); +IMPORT CONST float Sleef_asinf_u35(float); +IMPORT CONST float Sleef_acosf_u35(float); +IMPORT CONST float Sleef_atanf_u35(float); +IMPORT CONST float Sleef_atan2f_u35(float, float); +IMPORT CONST float Sleef_logf_u35(float); +IMPORT CONST float Sleef_cbrtf_u35(float); +IMPORT CONST float Sleef_sinf_u10(float); +IMPORT CONST float Sleef_cosf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u10(float); +IMPORT CONST float Sleef_fastsinf_u3500(float); +IMPORT CONST float Sleef_fastcosf_u3500(float); +IMPORT CONST float Sleef_tanf_u10(float); +IMPORT CONST float Sleef_asinf_u10(float); +IMPORT CONST float Sleef_acosf_u10(float); +IMPORT CONST float Sleef_atanf_u10(float); +IMPORT CONST float Sleef_atan2f_u10(float, float); +IMPORT CONST float Sleef_logf_u10(float); +IMPORT CONST float Sleef_cbrtf_u10(float); +IMPORT CONST float Sleef_expf_u10(float); +IMPORT CONST float Sleef_powf_u10(float, float); +IMPORT CONST float Sleef_fastpowf_u3500(float, float); +IMPORT CONST float Sleef_sinhf_u10(float); +IMPORT CONST float Sleef_coshf_u10(float); +IMPORT CONST float Sleef_tanhf_u10(float); +IMPORT CONST float Sleef_sinhf_u35(float); +IMPORT CONST float Sleef_coshf_u35(float); +IMPORT CONST float Sleef_tanhf_u35(float); +IMPORT CONST float Sleef_asinhf_u10(float); +IMPORT CONST float Sleef_acoshf_u10(float); +IMPORT CONST float Sleef_atanhf_u10(float); +IMPORT CONST float Sleef_exp2f_u10(float); +IMPORT CONST float Sleef_exp10f_u10(float); +IMPORT CONST float Sleef_exp2f_u35(float); +IMPORT CONST float Sleef_exp10f_u35(float); +IMPORT CONST float Sleef_expm1f_u10(float); +IMPORT CONST float Sleef_log10f_u10(float); +IMPORT CONST float Sleef_log2f_u10(float); +IMPORT CONST float Sleef_log2f_u35(float); +IMPORT CONST float Sleef_log1pf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u05(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u35(float); +IMPORT CONST float Sleef_sinpif_u05(float d); +IMPORT CONST float Sleef_cospif_u05(float d); +IMPORT CONST float Sleef_ldexpf(float, int); +IMPORT CONST int Sleef_ilogbf(float); +IMPORT CONST float Sleef_fmaf(float, float, float); +IMPORT CONST float Sleef_sqrtf(float); +IMPORT CONST float Sleef_sqrtf_u05(float); +IMPORT CONST float Sleef_sqrtf_u35(float); + +IMPORT CONST float Sleef_hypotf_u05(float, float); +IMPORT CONST float Sleef_hypotf_u35(float, float); + +IMPORT CONST float Sleef_fabsf(float); +IMPORT CONST float Sleef_copysignf(float, float); +IMPORT CONST float Sleef_fmaxf(float, float); +IMPORT CONST float Sleef_fminf(float, float); +IMPORT CONST float Sleef_fdimf(float, float); +IMPORT CONST float Sleef_truncf(float); +IMPORT CONST float Sleef_floorf(float); +IMPORT CONST float Sleef_ceilf(float); +IMPORT CONST float Sleef_roundf(float); +IMPORT CONST float Sleef_rintf(float); +IMPORT CONST float Sleef_nextafterf(float, float); +IMPORT CONST float Sleef_frfrexpf(float); +IMPORT CONST int Sleef_expfrexpf(float); +IMPORT CONST float Sleef_fmodf(float, float); +IMPORT CONST float Sleef_remainderf(float, float); +IMPORT CONST Sleef_float2 Sleef_modff(float); + +IMPORT CONST float Sleef_lgammaf_u10(float); +IMPORT CONST float Sleef_tgammaf_u10(float); +IMPORT CONST float Sleef_erff_u10(float); +IMPORT CONST float Sleef_erfcf_u15(float); + +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u05(long double); +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u35(long double); + +#if defined(Sleef_quad2_DEFINED) +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u05(Sleef_quad); +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u35(Sleef_quad); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2(__m128d); +IMPORT CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2(__m128d); +IMPORT CONST __m128d Sleef_floord2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2(__m128d); +IMPORT CONST __m128d Sleef_ceild2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2(__m128d); +IMPORT CONST __m128d Sleef_roundd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2(__m128d); +IMPORT CONST __m128d Sleef_rintd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15(__m128d); +IMPORT CONST int Sleef_getIntd2(int); +IMPORT CONST void *Sleef_getPtrd2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4(__m128); +IMPORT CONST __m128 Sleef_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4(__m128); +IMPORT CONST __m128 Sleef_floorf4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4(__m128); +IMPORT CONST __m128 Sleef_ceilf4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4(__m128); +IMPORT CONST __m128 Sleef_roundf4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4(__m128); +IMPORT CONST __m128 Sleef_rintf4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15(__m128); +IMPORT CONST int Sleef_getIntf4(int); +IMPORT CONST int Sleef_cinz_getIntf4(int); +IMPORT CONST void *Sleef_getPtrf4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d); +IMPORT CONST int Sleef_getIntd2_sse2(int); +IMPORT CONST void *Sleef_getPtrd2_sse2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128); +IMPORT CONST int Sleef_getIntf4_sse2(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse2(int); +IMPORT CONST void *Sleef_getPtrf4_sse2(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d); +IMPORT CONST int Sleef_getIntd2_sse4(int); +IMPORT CONST void *Sleef_getPtrd2_sse4(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128); +IMPORT CONST int Sleef_getIntf4_sse4(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse4(int); +IMPORT CONST void *Sleef_getPtrf4_sse4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4(__m256d); +IMPORT CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4(__m256d); +IMPORT CONST __m256d Sleef_floord4(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4(__m256d); +IMPORT CONST __m256d Sleef_ceild4(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4(__m256d); +IMPORT CONST __m256d Sleef_roundd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4(__m256d); +IMPORT CONST __m256d Sleef_rintd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15(__m256d); +IMPORT CONST int Sleef_getIntd4(int); +IMPORT CONST void *Sleef_getPtrd4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8(__m256); +IMPORT CONST __m256 Sleef_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8(__m256); +IMPORT CONST __m256 Sleef_floorf8(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8(__m256); +IMPORT CONST __m256 Sleef_ceilf8(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8(__m256); +IMPORT CONST __m256 Sleef_roundf8(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8(__m256); +IMPORT CONST __m256 Sleef_rintf8(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8(__m256); +IMPORT CONST __m256 Sleef_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15(__m256); +IMPORT CONST int Sleef_getIntf8(int); +IMPORT CONST int Sleef_cinz_getIntf8(int); +IMPORT CONST void *Sleef_getPtrf8(int); +IMPORT CONST void *Sleef_cinz_getPtrf8(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d); +IMPORT CONST int Sleef_getIntd4_avx(int); +IMPORT CONST void *Sleef_getPtrd4_avx(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256); +IMPORT CONST int Sleef_getIntf8_avx(int); +IMPORT CONST int Sleef_cinz_getIntf8_avx(int); +IMPORT CONST void *Sleef_getPtrf8_avx(int); +IMPORT CONST void *Sleef_cinz_getPtrf8_avx(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d); +IMPORT CONST int Sleef_getIntd4_fma4(int); +IMPORT CONST void *Sleef_getPtrd4_fma4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256); +IMPORT CONST int Sleef_getIntf8_fma4(int); +IMPORT CONST int Sleef_finz_getIntf8_fma4(int); +IMPORT CONST void *Sleef_getPtrf8_fma4(int); +IMPORT CONST void *Sleef_finz_getPtrf8_fma4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d); +IMPORT CONST int Sleef_getIntd4_avx2(int); +IMPORT CONST void *Sleef_getPtrd4_avx2(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256); +IMPORT CONST int Sleef_getIntf8_avx2(int); +IMPORT CONST int Sleef_finz_getIntf8_avx2(int); +IMPORT CONST void *Sleef_getPtrf8_avx2(int); +IMPORT CONST void *Sleef_finz_getPtrf8_avx2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d); +IMPORT CONST int Sleef_getIntd2_avx2128(int); +IMPORT CONST void *Sleef_getPtrd2_avx2128(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128); +IMPORT CONST int Sleef_getIntf4_avx2128(int); +IMPORT CONST int Sleef_finz_getIntf4_avx2128(int); +IMPORT CONST void *Sleef_getPtrf4_avx2128(int); +IMPORT CONST void *Sleef_finz_getPtrf4_avx2128(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8(__m512d); +IMPORT CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8(__m512d); +IMPORT CONST __m512d Sleef_floord8(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8(__m512d); +IMPORT CONST __m512d Sleef_ceild8(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8(__m512d); +IMPORT CONST __m512d Sleef_roundd8(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8(__m512d); +IMPORT CONST __m512d Sleef_rintd8(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15(__m512d); +IMPORT CONST int Sleef_getIntd8(int); +IMPORT CONST void *Sleef_getPtrd8(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16(__m512); +IMPORT CONST __m512 Sleef_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16(__m512); +IMPORT CONST __m512 Sleef_floorf16(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16(__m512); +IMPORT CONST __m512 Sleef_ceilf16(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16(__m512); +IMPORT CONST __m512 Sleef_roundf16(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16(__m512); +IMPORT CONST __m512 Sleef_rintf16(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16(__m512); +IMPORT CONST __m512 Sleef_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15(__m512); +IMPORT CONST int Sleef_getIntf16(int); +IMPORT CONST int Sleef_finz_getIntf16(int); +IMPORT CONST void *Sleef_getPtrf16(int); +IMPORT CONST void *Sleef_finz_getPtrf16(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512f(int); +IMPORT CONST void *Sleef_getPtrd8_avx512f(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512); +IMPORT CONST int Sleef_getIntf16_avx512f(int); +IMPORT CONST int Sleef_finz_getIntf16_avx512f(int); +IMPORT CONST void *Sleef_getPtrf16_avx512f(int); +IMPORT CONST void *Sleef_finz_getPtrf16_avx512f(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrd8_avx512fnofma(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST int Sleef_getIntf16_avx512fnofma(int); +IMPORT CONST int Sleef_cinz_getIntf16_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrf16_avx512fnofma(int); +IMPORT CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purec(double); +IMPORT CONST double Sleef_cinz_sind1_u35purec(double); +IMPORT CONST double Sleef_cosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double); +IMPORT CONST double Sleef_tand1_u35purec(double); +IMPORT CONST double Sleef_cinz_tand1_u35purec(double); +IMPORT CONST double Sleef_asind1_u35purec(double); +IMPORT CONST double Sleef_cinz_asind1_u35purec(double); +IMPORT CONST double Sleef_acosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u35purec(double); +IMPORT CONST double Sleef_atand1_u35purec(double); +IMPORT CONST double Sleef_cinz_atand1_u35purec(double); +IMPORT CONST double Sleef_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_logd1_u35purec(double); +IMPORT CONST double Sleef_cinz_logd1_u35purec(double); +IMPORT CONST double Sleef_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_sind1_u10purec(double); +IMPORT CONST double Sleef_cinz_sind1_u10purec(double); +IMPORT CONST double Sleef_cosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double); +IMPORT CONST double Sleef_tand1_u10purec(double); +IMPORT CONST double Sleef_cinz_tand1_u10purec(double); +IMPORT CONST double Sleef_asind1_u10purec(double); +IMPORT CONST double Sleef_cinz_asind1_u10purec(double); +IMPORT CONST double Sleef_acosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u10purec(double); +IMPORT CONST double Sleef_atand1_u10purec(double); +IMPORT CONST double Sleef_cinz_atand1_u10purec(double); +IMPORT CONST double Sleef_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_logd1_u10purec(double); +IMPORT CONST double Sleef_cinz_logd1_u10purec(double); +IMPORT CONST double Sleef_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_expd1_u10purec(double); +IMPORT CONST double Sleef_cinz_expd1_u10purec(double); +IMPORT CONST double Sleef_powd1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_powd1_u10purec(double, double); +IMPORT CONST double Sleef_sinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u10purec(double); +IMPORT CONST double Sleef_coshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u10purec(double); +IMPORT CONST double Sleef_tanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u10purec(double); +IMPORT CONST double Sleef_sinhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u35purec(double); +IMPORT CONST double Sleef_coshd1_u35purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u35purec(double); +IMPORT CONST double Sleef_tanhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u35purec(double); +IMPORT CONST double Sleef_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_cinz_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_asinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_asinhd1_u10purec(double); +IMPORT CONST double Sleef_acoshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acoshd1_u10purec(double); +IMPORT CONST double Sleef_atanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_atanhd1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u35purec(double); +IMPORT CONST double Sleef_exp10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u10purec(double); +IMPORT CONST double Sleef_exp10d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u35purec(double); +IMPORT CONST double Sleef_expm1d1_u10purec(double); +IMPORT CONST double Sleef_cinz_expm1d1_u10purec(double); +IMPORT CONST double Sleef_log10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log10d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u35purec(double); +IMPORT CONST double Sleef_log1pd1_u10purec(double); +IMPORT CONST double Sleef_cinz_log1pd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double); +IMPORT CONST double Sleef_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cinz_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cospid1_u05purec(double); +IMPORT CONST double Sleef_cinz_cospid1_u05purec(double); +IMPORT CONST double Sleef_ldexpd1_purec(double, int32_t); +IMPORT CONST double Sleef_cinz_ldexpd1_purec(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_ilogbd1_purec(double); +IMPORT CONST double Sleef_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_cinz_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_purec(double); +IMPORT CONST double Sleef_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_fabsd1_purec(double); +IMPORT CONST double Sleef_cinz_fabsd1_purec(double); +IMPORT CONST double Sleef_copysignd1_purec(double, double); +IMPORT CONST double Sleef_cinz_copysignd1_purec(double, double); +IMPORT CONST double Sleef_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_fmind1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmind1_purec(double, double); +IMPORT CONST double Sleef_fdimd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fdimd1_purec(double, double); +IMPORT CONST double Sleef_truncd1_purec(double); +IMPORT CONST double Sleef_cinz_truncd1_purec(double); +IMPORT CONST double Sleef_floord1_purec(double); +IMPORT CONST double Sleef_cinz_floord1_purec(double); +IMPORT CONST double Sleef_ceild1_purec(double); +IMPORT CONST double Sleef_cinz_ceild1_purec(double); +IMPORT CONST double Sleef_roundd1_purec(double); +IMPORT CONST double Sleef_cinz_roundd1_purec(double); +IMPORT CONST double Sleef_rintd1_purec(double); +IMPORT CONST double Sleef_cinz_rintd1_purec(double); +IMPORT CONST double Sleef_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_cinz_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_frfrexpd1_purec(double); +IMPORT CONST double Sleef_cinz_frfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_expfrexpd1_purec(double); +IMPORT CONST double Sleef_fmodd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmodd1_purec(double, double); +IMPORT CONST double Sleef_remainderd1_purec(double, double); +IMPORT CONST double Sleef_cinz_remainderd1_purec(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double); +IMPORT CONST double Sleef_lgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_lgammad1_u10purec(double); +IMPORT CONST double Sleef_tgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_tgammad1_u10purec(double); +IMPORT CONST double Sleef_erfd1_u10purec(double); +IMPORT CONST double Sleef_cinz_erfd1_u10purec(double); +IMPORT CONST double Sleef_erfcd1_u15purec(double); +IMPORT CONST double Sleef_cinz_erfcd1_u15purec(double); +IMPORT CONST int Sleef_getIntd1_purec(int); +IMPORT CONST void *Sleef_getPtrd1_purec(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u35purec(float); +IMPORT CONST float Sleef_cosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float); +IMPORT CONST float Sleef_tanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u35purec(float); +IMPORT CONST float Sleef_asinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u35purec(float); +IMPORT CONST float Sleef_acosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u35purec(float); +IMPORT CONST float Sleef_atanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u35purec(float); +IMPORT CONST float Sleef_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_logf1_u35purec(float); +IMPORT CONST float Sleef_cinz_logf1_u35purec(float); +IMPORT CONST float Sleef_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_sinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u10purec(float); +IMPORT CONST float Sleef_cosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float); +IMPORT CONST float Sleef_tanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u10purec(float); +IMPORT CONST float Sleef_asinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u10purec(float); +IMPORT CONST float Sleef_acosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u10purec(float); +IMPORT CONST float Sleef_atanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u10purec(float); +IMPORT CONST float Sleef_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_logf1_u10purec(float); +IMPORT CONST float Sleef_cinz_logf1_u10purec(float); +IMPORT CONST float Sleef_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_expf1_u10purec(float); +IMPORT CONST float Sleef_cinz_expf1_u10purec(float); +IMPORT CONST float Sleef_powf1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_powf1_u10purec(float, float); +IMPORT CONST float Sleef_sinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u10purec(float); +IMPORT CONST float Sleef_coshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u10purec(float); +IMPORT CONST float Sleef_tanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u10purec(float); +IMPORT CONST float Sleef_sinhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u35purec(float); +IMPORT CONST float Sleef_coshf1_u35purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u35purec(float); +IMPORT CONST float Sleef_tanhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u35purec(float); +IMPORT CONST float Sleef_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_cinz_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_asinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinhf1_u10purec(float); +IMPORT CONST float Sleef_acoshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acoshf1_u10purec(float); +IMPORT CONST float Sleef_atanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanhf1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u35purec(float); +IMPORT CONST float Sleef_exp10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u10purec(float); +IMPORT CONST float Sleef_exp10f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u35purec(float); +IMPORT CONST float Sleef_expm1f1_u10purec(float); +IMPORT CONST float Sleef_cinz_expm1f1_u10purec(float); +IMPORT CONST float Sleef_log10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log10f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u35purec(float); +IMPORT CONST float Sleef_log1pf1_u10purec(float); +IMPORT CONST float Sleef_cinz_log1pf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float); +IMPORT CONST float Sleef_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cinz_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cospif1_u05purec(float); +IMPORT CONST float Sleef_cinz_cospif1_u05purec(float); +IMPORT CONST float Sleef_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_cinz_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_purec(float); +IMPORT CONST float Sleef_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_fabsf1_purec(float); +IMPORT CONST float Sleef_cinz_fabsf1_purec(float); +IMPORT CONST float Sleef_copysignf1_purec(float, float); +IMPORT CONST float Sleef_cinz_copysignf1_purec(float, float); +IMPORT CONST float Sleef_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_fminf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fminf1_purec(float, float); +IMPORT CONST float Sleef_fdimf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fdimf1_purec(float, float); +IMPORT CONST float Sleef_truncf1_purec(float); +IMPORT CONST float Sleef_cinz_truncf1_purec(float); +IMPORT CONST float Sleef_floorf1_purec(float); +IMPORT CONST float Sleef_cinz_floorf1_purec(float); +IMPORT CONST float Sleef_ceilf1_purec(float); +IMPORT CONST float Sleef_cinz_ceilf1_purec(float); +IMPORT CONST float Sleef_roundf1_purec(float); +IMPORT CONST float Sleef_cinz_roundf1_purec(float); +IMPORT CONST float Sleef_rintf1_purec(float); +IMPORT CONST float Sleef_cinz_rintf1_purec(float); +IMPORT CONST float Sleef_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_cinz_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_frfrexpf1_purec(float); +IMPORT CONST float Sleef_cinz_frfrexpf1_purec(float); +IMPORT CONST float Sleef_fmodf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmodf1_purec(float, float); +IMPORT CONST float Sleef_remainderf1_purec(float, float); +IMPORT CONST float Sleef_cinz_remainderf1_purec(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_modff1_purec(float); +IMPORT CONST float Sleef_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_erff1_u10purec(float); +IMPORT CONST float Sleef_cinz_erff1_u10purec(float); +IMPORT CONST float Sleef_erfcf1_u15purec(float); +IMPORT CONST float Sleef_cinz_erfcf1_u15purec(float); +IMPORT CONST int Sleef_getIntf1_purec(int); +IMPORT CONST int Sleef_cinz_getIntf1_purec(int); +IMPORT CONST void *Sleef_getPtrf1_purec(int); +IMPORT CONST void *Sleef_cinz_getPtrf1_purec(int); +#endif +#ifdef FP_FAST_FMA + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u35purecfma(double); +IMPORT CONST double Sleef_cosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double); +IMPORT CONST double Sleef_tand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u35purecfma(double); +IMPORT CONST double Sleef_asind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u35purecfma(double); +IMPORT CONST double Sleef_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_atand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u35purecfma(double); +IMPORT CONST double Sleef_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_logd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u35purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_sind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u10purecfma(double); +IMPORT CONST double Sleef_cosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double); +IMPORT CONST double Sleef_tand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u10purecfma(double); +IMPORT CONST double Sleef_asind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u10purecfma(double); +IMPORT CONST double Sleef_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_atand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u10purecfma(double); +IMPORT CONST double Sleef_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_logd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u10purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_expd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expd1_u10purecfma(double); +IMPORT CONST double Sleef_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_log1pd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log1pd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double); +IMPORT CONST double Sleef_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_ldexpd1_purecfma(double, int32_t); +IMPORT CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_ilogbd1_purecfma(double); +IMPORT CONST double Sleef_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_finz_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_fabsd1_purecfma(double); +IMPORT CONST double Sleef_finz_fabsd1_purecfma(double); +IMPORT CONST double Sleef_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_truncd1_purecfma(double); +IMPORT CONST double Sleef_finz_truncd1_purecfma(double); +IMPORT CONST double Sleef_floord1_purecfma(double); +IMPORT CONST double Sleef_finz_floord1_purecfma(double); +IMPORT CONST double Sleef_ceild1_purecfma(double); +IMPORT CONST double Sleef_finz_ceild1_purecfma(double); +IMPORT CONST double Sleef_roundd1_purecfma(double); +IMPORT CONST double Sleef_finz_roundd1_purecfma(double); +IMPORT CONST double Sleef_rintd1_purecfma(double); +IMPORT CONST double Sleef_finz_rintd1_purecfma(double); +IMPORT CONST double Sleef_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_frfrexpd1_purecfma(double); +IMPORT CONST double Sleef_finz_frfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_expfrexpd1_purecfma(double); +IMPORT CONST double Sleef_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_remainderd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_remainderd1_purecfma(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double); +IMPORT CONST double Sleef_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_erfcd1_u15purecfma(double); +IMPORT CONST double Sleef_finz_erfcd1_u15purecfma(double); +IMPORT CONST int Sleef_getIntd1_purecfma(int); +IMPORT CONST void *Sleef_getPtrd1_purecfma(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_cosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float); +IMPORT CONST float Sleef_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_logf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u35purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_cosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float); +IMPORT CONST float Sleef_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_logf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u10purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_expf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expf1_u10purecfma(float); +IMPORT CONST float Sleef_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_log1pf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log1pf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float); +IMPORT CONST float Sleef_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_finz_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_fabsf1_purecfma(float); +IMPORT CONST float Sleef_finz_fabsf1_purecfma(float); +IMPORT CONST float Sleef_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_truncf1_purecfma(float); +IMPORT CONST float Sleef_finz_truncf1_purecfma(float); +IMPORT CONST float Sleef_floorf1_purecfma(float); +IMPORT CONST float Sleef_finz_floorf1_purecfma(float); +IMPORT CONST float Sleef_ceilf1_purecfma(float); +IMPORT CONST float Sleef_finz_ceilf1_purecfma(float); +IMPORT CONST float Sleef_roundf1_purecfma(float); +IMPORT CONST float Sleef_finz_roundf1_purecfma(float); +IMPORT CONST float Sleef_rintf1_purecfma(float); +IMPORT CONST float Sleef_finz_rintf1_purecfma(float); +IMPORT CONST float Sleef_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_finz_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_remainderf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_remainderf1_purecfma(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float); +IMPORT CONST float Sleef_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_erff1_u10purecfma(float); +IMPORT CONST float Sleef_finz_erff1_u10purecfma(float); +IMPORT CONST float Sleef_erfcf1_u15purecfma(float); +IMPORT CONST float Sleef_finz_erfcf1_u15purecfma(float); +IMPORT CONST int Sleef_getIntf1_purecfma(int); +IMPORT CONST int Sleef_finz_getIntf1_purecfma(int); +IMPORT CONST void *Sleef_getPtrf1_purecfma(int); +IMPORT CONST void *Sleef_finz_getPtrf1_purecfma(int); +#endif +#ifdef __cplusplus +} +#endif + +#undef IMPORT +#endif // #ifndef __SLEEF_H__ diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..665c38bf035d45eafc0575f76a49cacfb9169371 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Device.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct TORCH_API THPDevice { + PyObject_HEAD at::Device device; +}; + +TORCH_API extern PyTypeObject THPDeviceType; + +inline bool THPDevice_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDeviceType; +} + +TORCH_API PyObject* THPDevice_New(const at::Device& device); + +TORCH_API void THPDevice_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h new file mode 100644 index 0000000000000000000000000000000000000000..3142eb97a000173c776c1c4604665c51a7ba20cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +constexpr int DTYPE_NAME_LEN = 64; + +struct TORCH_API THPDtype { + PyObject_HEAD at::ScalarType scalar_type; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[DTYPE_NAME_LEN + 1]; +}; + +TORCH_API extern PyTypeObject THPDtypeType; + +inline bool THPDtype_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDtypeType; +} + +inline bool THPPythonScalarType_Check(PyObject* obj) { + return obj == (PyObject*)(&PyFloat_Type) || + obj == (PyObject*)(&PyBool_Type) || obj == (PyObject*)(&PyLong_Type); +} + +TORCH_API PyObject* THPDtype_New( + at::ScalarType scalar_type, + const std::string& name); + +void THPDtype_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..6b8d923f40909be5f9e2ebc7e95ee69fb0a8842f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Exceptions.h @@ -0,0 +1,390 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(USE_DISTRIBUTED) && defined(USE_C10D) +#include +#endif + +static inline void PyErr_SetString(PyObject* type, const std::string& message) { + PyErr_SetString(type, message.c_str()); +} +/// NOTE [ Conversion Cpp Python Warning ] +/// The warning handler cannot set python warnings immediately +/// as it requires acquiring the GIL (potential deadlock) +/// and would need to cleanly exit if the warning raised a +/// python error. To solve this, we buffer the warnings and +/// process them when we go back to python. +/// This requires the two try/catch blocks below to handle the +/// following cases: +/// - If there is no Error raised in the inner try/catch, the +/// buffered warnings are processed as python warnings. +/// - If they don't raise an error, the function process with the +/// original return code. +/// - If any of them raise an error, the error is set (PyErr_*) and +/// the destructor will raise a cpp exception python_error() that +/// will be caught by the outer try/catch that will be able to change +/// the return value of the function to reflect the error. +/// - If an Error was raised in the inner try/catch, the inner try/catch +/// must set the python error. The buffered warnings are then +/// processed as cpp warnings as we cannot predict before hand +/// whether a python warning will raise an error or not and we +/// cannot handle two errors at the same time. +/// This advanced handler will only be used in the current thread. +/// If any other thread is used, warnings will be processed as +/// cpp warnings. +#define HANDLE_TH_ERRORS \ + try { \ + torch::PyWarningHandler __enforce_warning_buffer; \ + try { +#define _CATCH_GENERIC_ERROR(ErrorType, PythonErrorType, retstmnt) \ + catch (const c10::ErrorType& e) { \ + auto msg = torch::get_cpp_stacktraces_enabled() \ + ? e.what() \ + : e.what_without_backtrace(); \ + PyErr_SetString(PythonErrorType, torch::processErrorMsg(msg)); \ + retstmnt; \ + } + +// Only catch torch-specific exceptions +#define CATCH_CORE_ERRORS(retstmnt) \ + catch (python_error & e) { \ + e.restore(); \ + retstmnt; \ + } \ + catch (py::error_already_set & e) { \ + e.restore(); \ + retstmnt; \ + } \ + _CATCH_GENERIC_ERROR(IndexError, PyExc_IndexError, retstmnt) \ + _CATCH_GENERIC_ERROR(ValueError, PyExc_ValueError, retstmnt) \ + _CATCH_GENERIC_ERROR(TypeError, PyExc_TypeError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + NotImplementedError, PyExc_NotImplementedError, retstmnt) \ + _CATCH_GENERIC_ERROR(LinAlgError, THPException_LinAlgError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + OutOfMemoryError, THPException_OutOfMemoryError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistBackendError, THPException_DistBackendError, retstmnt) \ + _CATCH_GENERIC_ERROR( \ + DistNetworkError, THPException_DistNetworkError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistStoreError, THPException_DistStoreError, retstmnt) \ + _CATCH_GENERIC_ERROR(DistError, THPException_DistError, retstmnt) \ + _CATCH_GENERIC_ERROR(Error, PyExc_RuntimeError, retstmnt) \ + catch (torch::PyTorchError & e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(e.python_type(), msg); \ + retstmnt; \ + } + +#define CATCH_TH_ERRORS(retstmnt) CATCH_CORE_ERRORS(retstmnt) + +#define CATCH_ALL_ERRORS(retstmnt) \ + CATCH_TH_ERRORS(retstmnt) \ + catch (const std::exception& e) { \ + auto msg = torch::processErrorMsg(e.what()); \ + PyErr_SetString(PyExc_RuntimeError, msg); \ + retstmnt; \ + } + +#define END_HANDLE_TH_ERRORS_PYBIND \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (py::error_already_set & e) { \ + throw; \ + } \ + catch (py::builtin_exception & e) { \ + throw; \ + } \ + catch (torch::jit::JITException & e) { \ + throw; \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + throw py::error_already_set(); \ + } + +#define END_HANDLE_TH_ERRORS_RET(retval) \ + } \ + catch (...) { \ + __enforce_warning_buffer.set_in_exception(); \ + throw; \ + } \ + } \ + catch (const std::exception& e) { \ + torch::translate_exception_to_python(std::current_exception()); \ + return retval; \ + } + +#define END_HANDLE_TH_ERRORS END_HANDLE_TH_ERRORS_RET(nullptr) + +extern PyObject *THPException_FatalError, *THPException_LinAlgError, + *THPException_OutOfMemoryError, *THPException_DistError, + *THPException_DistBackendError, *THPException_DistNetworkError, + *THPException_DistStoreError; + +// Throwing this exception means that the python error flags have been already +// set and control should be immediately returned to the interpreter. +struct python_error : public std::exception { + python_error() = default; + + python_error(const python_error& other) + : type(other.type), + value(other.value), + traceback(other.traceback), + message(other.message) { + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + } + + python_error(python_error&& other) noexcept + : type(other.type), + value(other.value), + traceback(other.traceback), + message(std::move(other.message)) { + other.type = nullptr; + other.value = nullptr; + other.traceback = nullptr; + } + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~python_error() override { + if (type || value || traceback) { + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + } + + const char* what() const noexcept override { + return message.c_str(); + } + + void build_message() { + // Ensure we have the GIL. + pybind11::gil_scoped_acquire gil; + + // No errors should be set when we enter the function since PyErr_Fetch + // clears the error indicator. + TORCH_INTERNAL_ASSERT(!PyErr_Occurred()); + + // Default message. + message = "python_error"; + + // Try to retrieve the error message from the value. + if (value != nullptr) { + // Reference count should not be zero. + TORCH_INTERNAL_ASSERT(Py_REFCNT(value) > 0); + + PyObject* pyStr = PyObject_Str(value); + if (pyStr != nullptr) { + PyObject* encodedString = + PyUnicode_AsEncodedString(pyStr, "utf-8", "strict"); + if (encodedString != nullptr) { + char* bytes = PyBytes_AS_STRING(encodedString); + if (bytes != nullptr) { + // Set the message. + message = std::string(bytes); + } + Py_XDECREF(encodedString); + } + Py_XDECREF(pyStr); + } + } + + // Clear any errors since we don't want to propagate errors for functions + // that are trying to build a string for the error message. + PyErr_Clear(); + } + + /** Saves the exception so that it can be re-thrown on a different thread */ + inline void persist() { + if (type) + return; // Don't overwrite exceptions + // PyErr_Fetch overwrites the pointers + pybind11::gil_scoped_acquire gil; + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + PyErr_Fetch(&type, &value, &traceback); + build_message(); + } + + /** Sets the current Python error from this exception */ + inline void restore() { + if (!type) + return; + // PyErr_Restore steals references + pybind11::gil_scoped_acquire gil; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(traceback); + PyErr_Restore(type, value, traceback); + } + + PyObject* type{nullptr}; + PyObject* value{nullptr}; + PyObject* traceback{nullptr}; + + // Message to return to the user when 'what()' is invoked. + std::string message; +}; + +bool THPException_init(PyObject* module); + +namespace torch { + +// Set python current exception from a C++ exception +TORCH_PYTHON_API void translate_exception_to_python(const std::exception_ptr&); + +TORCH_PYTHON_API std::string processErrorMsg(std::string str); + +// Abstract base class for exceptions which translate to specific Python types +struct PyTorchError : public std::exception { + PyTorchError() = default; + PyTorchError(std::string msg_) : msg(std::move(msg_)) {} + virtual PyObject* python_type() = 0; + const char* what() const noexcept override { + return msg.c_str(); + } + std::string msg; +}; + +// Declare a printf-like function on gcc & clang +// The compiler can then warn on invalid format specifiers +#ifdef __GNUC__ +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) \ + __attribute__((format(printf, FORMAT_INDEX, VA_ARGS_INDEX))) +#else +#define TORCH_FORMAT_FUNC(FORMAT_INDEX, VA_ARGS_INDEX) +#endif + +// Translates to Python TypeError +struct TypeError : public PyTorchError { + using PyTorchError::PyTorchError; + TORCH_PYTHON_API TypeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_TypeError; + } +}; + +// Translates to Python AttributeError +struct AttributeError : public PyTorchError { + AttributeError(const char* format, ...) TORCH_FORMAT_FUNC(2, 3); + PyObject* python_type() override { + return PyExc_AttributeError; + } +}; + +// ATen warning handler for Python +struct PyWarningHandler { + // Move actual handler into a separate class with a noexcept + // destructor. Otherwise, we need to force all WarningHandler + // subclasses to have a noexcept(false) destructor. + struct InternalHandler : at::WarningHandler { + ~InternalHandler() override = default; + void process(const c10::Warning& warning) override; + + std::vector warning_buffer_; + }; + + public: + /// See NOTE [ Conversion Cpp Python Warning ] for noexcept justification + TORCH_PYTHON_API PyWarningHandler() noexcept(true); + // NOLINTNEXTLINE(bugprone-exception-escape) + TORCH_PYTHON_API ~PyWarningHandler() noexcept(false); + + /** Call if an exception has been thrown + + * Necessary to determine if it is safe to throw from the desctructor since + * std::uncaught_exception is buggy on some platforms and generally + * unreliable across dynamic library calls. + */ + void set_in_exception() { + in_exception_ = true; + } + + private: + InternalHandler internal_handler_; + at::WarningHandler* prev_handler_; + bool in_exception_; +}; + +namespace detail { + +struct noop_gil_scoped_release { + // user-defined constructor (i.e. not defaulted) to avoid + // unused-variable warnings at usage sites of this class + noop_gil_scoped_release() {} +}; + +template +using conditional_gil_scoped_release = std::conditional_t< + release_gil, + pybind11::gil_scoped_release, + noop_gil_scoped_release>; + +template +using Arg = typename invoke_traits::template arg::type; + +template +auto wrap_pybind_function_impl_( + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) + Func&& f, + std::index_sequence, + std::bool_constant) { + namespace py = pybind11; + + // f=f is needed to handle function references on older compilers + return [f = std::forward(f)](Arg... args) { + HANDLE_TH_ERRORS + conditional_gil_scoped_release no_gil; + return c10::guts::invoke(f, std::forward>(args)...); + END_HANDLE_TH_ERRORS_PYBIND + }; +} +} // namespace detail + +// Wrap a function with TH error and warning handling. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), + std::make_index_sequence{}, + std::false_type{}); +} + +// Wrap a function with TH error, warning handling and releases the GIL. +// Returns a function object suitable for registering with pybind11. +template +auto wrap_pybind_function_no_gil(Func&& f) { + using traits = invoke_traits; + return torch::detail::wrap_pybind_function_impl_( + std::forward(f), + std::make_index_sequence{}, + std::true_type{}); +} + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..f5b7b4661eb5851ac77a6dc25192b65a6e125b0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Generator.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPGenerator { + PyObject_HEAD at::Generator cdata; +}; + +// Creates a new Python object wrapping the default at::Generator. The reference +// is borrowed. The caller should ensure that the at::Generator object lifetime +// last at least as long as the Python wrapper. +TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator( + at::Generator cdata); + +#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass) + +TORCH_PYTHON_API extern PyObject* THPGeneratorClass; + +bool THPGenerator_init(PyObject* module); + +TORCH_PYTHON_API PyObject* THPGenerator_Wrap(at::Generator gen); + +// Creates a new Python object for a Generator. The Generator must not already +// have a PyObject* associated with it. +PyObject* THPGenerator_NewWithVar(PyTypeObject* type, at::Generator gen); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..7f60a0ba0282c39cb8c72876a4288560ec280b93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/MemoryFormat.h @@ -0,0 +1,27 @@ +#pragma once + +#include + +#include + +#include + +const int MEMORY_FORMAT_NAME_LEN = 64; + +struct THPMemoryFormat { + PyObject_HEAD at::MemoryFormat memory_format; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[MEMORY_FORMAT_NAME_LEN + 1]; +}; + +extern PyTypeObject THPMemoryFormatType; + +inline bool THPMemoryFormat_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPMemoryFormatType; +} + +PyObject* THPMemoryFormat_New( + at::MemoryFormat memory_format, + const std::string& name); + +void THPMemoryFormat_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h new file mode 100644 index 0000000000000000000000000000000000000000..71ff8c4fcb85e2c9e55fb4c0660ef506b6fda6e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Module.h @@ -0,0 +1,6 @@ +#ifndef THP_MODULE_INC +#define THP_MODULE_INC + +#define THP_STATELESS_ATTRIBUTE_NAME "_torch" + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..30809ff10be90e2d091002ce4c2abb8e731b8d0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h @@ -0,0 +1,7 @@ +#pragma once + +#include +#include + +TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter(); +TORCH_PYTHON_API bool isMainPyInterpreter(); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb75304c0ed0bf885a058e6f08d0cc8fe23ec3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +#include + +constexpr int QSCHEME_NAME_LEN = 64; + +struct THPQScheme { + PyObject_HEAD at::QScheme qscheme; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[QSCHEME_NAME_LEN + 1]; +}; + +extern PyTypeObject THPQSchemeType; + +inline bool THPQScheme_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPQSchemeType; +} + +PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name); + +void THPQScheme_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h new file mode 100644 index 0000000000000000000000000000000000000000..dd4283f7d77234cbd3dac815456981b22a4dad00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Size.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +extern PyTypeObject THPSizeType; + +#define THPSize_Check(obj) (Py_TYPE(obj) == &THPSizeType) + +PyObject* THPSize_New(const torch::autograd::Variable& t); +PyObject* THPSize_NewFromSizes(int64_t dim, const int64_t* sizes); +PyObject* THPSize_NewFromSymSizes(const at::Tensor& t); + +void THPSize_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..16bf87bbcc2ea2076d07eb9d2c612e7082864ed5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Storage.h @@ -0,0 +1,60 @@ +#ifndef THP_STORAGE_INC +#define THP_STORAGE_INC + +#include +#include +#include +#include +#include + +#define THPStorageStr "torch.UntypedStorage" + +struct THPStorage { + PyObject_HEAD; + c10::MaybeOwned cdata; + bool is_hermetic; +}; + +TORCH_PYTHON_API PyObject* THPStorage_Wrap(c10::Storage storage); +TORCH_PYTHON_API PyObject* THPStorage_NewWithStorage( + PyTypeObject* type, + c10::Storage _storage, + c10::impl::PyInterpreterStatus status, + bool allow_preexisting_pyobj = false); +extern PyTypeObject* THPStorageClass; + +static inline bool THPStorage_CheckTypeExact(PyTypeObject* tp) { + return tp == THPStorageClass; +} + +static inline bool THPStorage_CheckExact(PyObject* obj) { + return THPStorage_CheckTypeExact(Py_TYPE(obj)); +} + +inline bool THPStorage_Check(PyObject* obj) { + if (!THPStorageClass) + return false; + + const auto result = PyObject_IsInstance(obj, (PyObject*)THPStorageClass); + if (result == -1) + throw python_error(); + return result; +} + +bool THPStorage_init(PyObject* module); +void THPStorage_postInit(PyObject* module); + +void THPStorage_assertNotNull(THPStorage* storage); +void THPStorage_assertNotNull(PyObject* obj); + +extern PyTypeObject THPStorageType; + +inline const c10::Storage& THPStorage_Unpack(THPStorage* storage) { + return *storage->cdata; +} + +inline const c10::Storage& THPStorage_Unpack(PyObject* obj) { + return THPStorage_Unpack(reinterpret_cast(obj)); +} + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h new file mode 100644 index 0000000000000000000000000000000000000000..bd0825fa30142ba7101510765b1b230142ab4f0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageMethods.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_METHODS_INC +#define THP_STORAGE_METHODS_INC + +#include + +PyMethodDef* THPStorage_getMethods(); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h new file mode 100644 index 0000000000000000000000000000000000000000..803abf1832f000084c8e55cf147c51fbc511a0cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_SHARING_INC +#define THP_STORAGE_SHARING_INC + +#include + +PyMethodDef* THPStorage_getSharingMethods(); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..91f1abe0516ce5555a8460e6fca232bd518e8ad0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h @@ -0,0 +1,23 @@ +#ifndef THP_STREAM_INC +#define THP_STREAM_INC + +#include +#include +#include + +struct THPStream { + PyObject_HEAD int64_t stream_id; + int64_t device_type; + int64_t device_index; +}; +extern TORCH_API PyTypeObject* THPStreamClass; + +void THPStream_init(PyObject* module); + +inline bool THPStream_Check(PyObject* obj) { + return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass); +} + +PyObject* THPStream_Wrap(const c10::Stream& stream); + +#endif // THP_STREAM_INC diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h new file mode 100644 index 0000000000000000000000000000000000000000..97d12e4eea5c6bbea483a1e2ebfd1a1ed7065411 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include + +struct THPDTypeInfo { + PyObject_HEAD at::ScalarType type; +}; + +struct THPFInfo : THPDTypeInfo {}; + +struct THPIInfo : THPDTypeInfo {}; + +extern PyTypeObject THPFInfoType; +extern PyTypeObject THPIInfoType; + +inline bool THPFInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPFInfoType; +} + +inline bool THPIInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPIInfoType; +} + +void THPDTypeInfo_init(PyObject* module); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h new file mode 100644 index 0000000000000000000000000000000000000000..01a20cb01dff68de245f3c8b16ca1914beb76a1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/Types.h @@ -0,0 +1,13 @@ +#ifndef THP_TYPES_INC +#define THP_TYPES_INC + +#include + +#ifndef INT64_MAX +#include +#endif + +template +struct THPTypeInfo {}; + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..4d7b689016bd9e5fc5864b6480ed6b1188ed0372 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/copy_utils.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include +#include + +typedef std::function THPCopyFunction; +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPCopyInfo { + PyTypeObject* srcType; // Python type of src tensor/storage + THPCopyFunction copy; // copy function + bool non_blocking; // true if copy implements an 'non_blocking' copy + bool broadcast; // true if the copy implements a broadcast copy +}; +typedef std::vector THPCopyList; + +inline bool tryTHPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + for (auto& i : v) { + if (i.non_blocking == non_blocking && + PyType_IsSubtype(Py_TYPE(src), i.srcType)) { + (i.copy)(dst, src, broadcast); + return true; + } + } + return false; +} + +inline bool THPCopy( + const THPCopyList& v, + PyObject* dst, + PyObject* src, + bool non_blocking, + bool broadcast) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (tryTHPCopy(v, dst, src, non_blocking, broadcast)) { + return true; + } else if (non_blocking && tryTHPCopy(v, dst, src, false, broadcast)) { + return true; + } + THPUtils_setError( + "copy from %s to %s isn't implemented", + THPUtils_typename(src), + THPUtils_typename(dst)); + return false; +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..502af374ff3d34218927b0d066b692d494a397d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/itt_wrapper.h @@ -0,0 +1,12 @@ +#ifndef PROFILER_ITT_H +#define PROFILER_ITT_H +#include + +namespace torch::profiler { +TORCH_API bool itt_is_available(); +TORCH_API void itt_range_push(const char* msg); +TORCH_API void itt_range_pop(); +TORCH_API void itt_mark(const char* msg); +} // namespace torch::profiler + +#endif // PROFILER_ITT_H diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..01a6007e9f8e824a1dae904d63cc01ef091b03af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_dimname.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +at::Dimname THPDimname_parse(PyObject* obj); +bool THPUtils_checkDimname(PyObject* obj); +bool THPUtils_checkDimnameList(PyObject* obj); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h new file mode 100644 index 0000000000000000000000000000000000000000..0130e41ccb46edf3ab5d5a35c80607383acbddf8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/python_headers.h @@ -0,0 +1,25 @@ +#pragma once +// workaround for https://github.com/python/cpython/pull/23326 +#include +#include +// workaround for Python 2 issue: https://bugs.python.org/issue17120 +// NOTE: It looks like this affects Python 3 as well. +#pragma push_macro("_XOPEN_SOURCE") +#pragma push_macro("_POSIX_C_SOURCE") +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE + +#include +#include +#include + +#pragma pop_macro("_XOPEN_SOURCE") +#pragma pop_macro("_POSIX_C_SOURCE") + +#ifdef copysign +#undef copysign +#endif + +#if PY_MAJOR_VERSION < 3 +#error "Python 2 has reached end-of-life and is no longer supported by PyTorch." +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..3e10784c2459679bb1bed442782d0fa51cc88b2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/serialization.h @@ -0,0 +1,27 @@ +#ifndef THP_SERIALIZATION_INC +#define THP_SERIALIZATION_INC + +#include +#include +template +void doRead(io fildes, void* buf, size_t nbytes); + +template +void doWrite(io fildes, void* buf, size_t nbytes); + +// Note that this takes a mutable storage because it may pass through +// to at::from_blob. +template +void THPStorage_writeFileRaw( + c10::StorageImpl* self, + io fd, + bool save_size, + uint64_t element_size); + +template +c10::intrusive_ptr THPStorage_readFileRaw( + io fd, + c10::intrusive_ptr storage, + uint64_t element_size); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..5a610c28d2b1e77f236591f39adc43057dc0c18b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils.h @@ -0,0 +1,217 @@ +#ifndef THP_UTILS_H +#define THP_UTILS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_CUDA +#include +#endif + +#define THPUtils_(NAME) TH_CONCAT_4(THP, Real, Utils_, NAME) + +#define THPUtils_typename(obj) (Py_TYPE(obj)->tp_name) + +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define THP_EXPECT(x, y) (__builtin_expect((x), (y))) +#else +#define THP_EXPECT(x, y) (x) +#endif + +#define THPUtils_checkReal_FLOAT(object) \ + (PyFloat_Check(object) || PyLong_Check(object)) + +#define THPUtils_unpackReal_FLOAT(object) \ + (PyFloat_Check(object) ? PyFloat_AsDouble(object) \ + : PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_checkReal_INT(object) PyLong_Check(object) + +#define THPUtils_unpackReal_INT(object) \ + (PyLong_Check(object) \ + ? PyLong_AsLongLong(object) \ + : (throw std::runtime_error("Could not parse real"), 0)) + +#define THPUtils_unpackReal_BOOL(object) \ + (PyBool_Check(object) \ + ? object \ + : (throw std::runtime_error("Could not parse real"), Py_False)) + +#define THPUtils_unpackReal_COMPLEX(object) \ + (PyComplex_Check(object) \ + ? (c10::complex( \ + PyComplex_RealAsDouble(object), PyComplex_ImagAsDouble(object))) \ + : PyFloat_Check(object) \ + ? (c10::complex(PyFloat_AsDouble(object), 0)) \ + : PyLong_Check(object) \ + ? (c10::complex(PyLong_AsLongLong(object), 0)) \ + : (throw std::runtime_error("Could not parse real"), \ + c10::complex(0, 0))) + +#define THPUtils_checkReal_BOOL(object) PyBool_Check(object) + +#define THPUtils_checkReal_COMPLEX(object) \ + PyComplex_Check(object) || PyFloat_Check(object) || PyLong_Check(object) || \ + PyInt_Check(object) + +#define THPUtils_newReal_FLOAT(value) PyFloat_FromDouble(value) +#define THPUtils_newReal_INT(value) PyInt_FromLong(value) + +#define THPUtils_newReal_BOOL(value) PyBool_FromLong(value) + +#define THPUtils_newReal_COMPLEX(value) \ + PyComplex_FromDoubles(value.real(), value.imag()) + +#define THPDoubleUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPDoubleUtils_unpackReal(object) \ + (double)THPUtils_unpackReal_FLOAT(object) +#define THPDoubleUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPFloatUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPFloatUtils_unpackReal(object) \ + (float)THPUtils_unpackReal_FLOAT(object) +#define THPFloatUtils_newReal(value) THPUtils_newReal_FLOAT(value) +#define THPHalfUtils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPHalfUtils_unpackReal(object) \ + (at::Half) THPUtils_unpackReal_FLOAT(object) +#define THPHalfUtils_newReal(value) PyFloat_FromDouble(value) +#define THPHalfUtils_newAccreal(value) THPUtils_newReal_FLOAT(value) +#define THPComplexDoubleUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexDoubleUtils_unpackReal(object) \ + THPUtils_unpackReal_COMPLEX(object) +#define THPComplexDoubleUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPComplexFloatUtils_checkReal(object) \ + THPUtils_checkReal_COMPLEX(object) +#define THPComplexFloatUtils_unpackReal(object) \ + (c10::complex)THPUtils_unpackReal_COMPLEX(object) +#define THPComplexFloatUtils_newReal(value) THPUtils_newReal_COMPLEX(value) +#define THPBFloat16Utils_checkReal(object) THPUtils_checkReal_FLOAT(object) +#define THPBFloat16Utils_unpackReal(object) \ + (at::BFloat16) THPUtils_unpackReal_FLOAT(object) +#define THPBFloat16Utils_newReal(value) PyFloat_FromDouble(value) +#define THPBFloat16Utils_newAccreal(value) THPUtils_newReal_FLOAT(value) + +#define THPBoolUtils_checkReal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackReal(object) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newReal(value) THPUtils_newReal_BOOL(value) +#define THPBoolUtils_checkAccreal(object) THPUtils_checkReal_BOOL(object) +#define THPBoolUtils_unpackAccreal(object) \ + (int64_t) THPUtils_unpackReal_BOOL(object) +#define THPBoolUtils_newAccreal(value) THPUtils_newReal_BOOL(value) +#define THPLongUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPLongUtils_unpackReal(object) \ + (int64_t) THPUtils_unpackReal_INT(object) +#define THPLongUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPIntUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPIntUtils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPIntUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPShortUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPShortUtils_unpackReal(object) (short)THPUtils_unpackReal_INT(object) +#define THPShortUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPCharUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPCharUtils_unpackReal(object) (char)THPUtils_unpackReal_INT(object) +#define THPCharUtils_newReal(value) THPUtils_newReal_INT(value) +#define THPByteUtils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPByteUtils_unpackReal(object) \ + (unsigned char)THPUtils_unpackReal_INT(object) +#define THPByteUtils_newReal(value) THPUtils_newReal_INT(value) +// quantized types +#define THPQUInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt8Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt8Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt8Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQInt32Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQInt32Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQInt32Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt4x2Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt4x2Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt4x2Utils_newReal(value) THPUtils_newReal_INT(value) +#define THPQUInt2x4Utils_checkReal(object) THPUtils_checkReal_INT(object) +#define THPQUInt2x4Utils_unpackReal(object) (int)THPUtils_unpackReal_INT(object) +#define THPQUInt2x4Utils_newReal(value) THPUtils_newReal_INT(value) + +/* + From https://github.com/python/cpython/blob/v3.7.0/Modules/xxsubtype.c + If compiled as a shared library, some compilers don't allow addresses of + Python objects defined in other libraries to be used in static PyTypeObject + initializers. The DEFERRED_ADDRESS macro is used to tag the slots where such + addresses appear; the module init function that adds the PyTypeObject to the + module must fill in the tagged slots at runtime. The argument is for + documentation -- the macro ignores it. +*/ +#define DEFERRED_ADDRESS(ADDR) nullptr + +TORCH_PYTHON_API void THPUtils_setError(const char* format, ...); +TORCH_PYTHON_API void THPUtils_invalidArguments( + PyObject* given_args, + PyObject* given_kwargs, + const char* function_name, + size_t num_options, + ...); + +bool THPUtils_checkIntTuple(PyObject* arg); +std::vector THPUtils_unpackIntTuple(PyObject* arg); + +TORCH_PYTHON_API void THPUtils_addPyMethodDefs( + std::vector& vector, + PyMethodDef* methods); + +int THPUtils_getCallable(PyObject* arg, PyObject** result); + +typedef THPPointer THPGeneratorPtr; +typedef class THPPointer THPStoragePtr; + +TORCH_PYTHON_API std::vector THPUtils_unpackLongs(PyObject* arg); +PyObject* THPUtils_dispatchStateless( + PyObject* tensor, + const char* name, + PyObject* args, + PyObject* kwargs); + +template +struct mod_traits {}; + +template +struct mod_traits<_real, std::enable_if_t>> { + static _real mod(_real a, _real b) { + return fmod(a, b); + } +}; + +template +struct mod_traits<_real, std::enable_if_t>> { + static _real mod(_real a, _real b) { + return a % b; + } +}; + +void setBackCompatBroadcastWarn(bool warn); +bool getBackCompatBroadcastWarn(); + +void setBackCompatKeepdimWarn(bool warn); +bool getBackCompatKeepdimWarn(); +bool maybeThrowBackCompatKeepdimWarn(char* func); + +// NB: This is in torch/csrc/cuda/utils.cpp, for whatever reason +#ifdef USE_CUDA +std::vector> +THPUtils_PySequence_to_CUDAStreamList(PyObject* obj); +#endif + +void storage_fill(const at::Storage& self, uint8_t value); +void storage_set(const at::Storage& self, ptrdiff_t idx, uint8_t value); +uint8_t storage_get(const at::Storage& self, ptrdiff_t idx); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/custom_class.h b/venv/lib/python3.10/site-packages/torch/include/torch/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..a556ae6a81e572f1db0efed8dd0640afb40cea82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/custom_class.h @@ -0,0 +1,515 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// This function is used in conjunction with `class_::def()` to register +/// a constructor for a given C++ class type. For example, +/// `torch::init()` would register a two-argument constructor +/// taking an `int` and a `std::string` as argument. +template +detail::types init() { + return detail::types{}; +} + +template +struct InitLambda { + Func f; +}; + +template +decltype(auto) init(Func&& f) { + using InitTraits = c10::guts::infer_function_traits_t>; + using ParameterTypeList = typename InitTraits::parameter_types; + + InitLambda init{std::forward(f)}; + return init; +} + +/// Entry point for custom C++ class registration. To register a C++ class +/// in PyTorch, instantiate `torch::class_` with the desired class as the +/// template parameter. Typically, this instantiation should be done in +/// the initialization of a global variable, so that the class will be +/// made available on dynamic library loading without any additional API +/// calls needed. For example, to register a class named Foo, you might +/// create a global variable like so: +/// +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod) +/// .def("lambdaMethod", [](const c10::intrusive_ptr& self) { +/// // Do something with `self` +/// }); +/// +/// In addition to registering the class, this registration also chains +/// `def()` calls to register methods. `myMethod()` is registered with +/// a pointer to the Foo class's `myMethod()` method. `lambdaMethod()` +/// is registered with a C++ lambda expression. +template +class class_ : public ::torch::detail::class_base { + static_assert( + std::is_base_of::value, + "torch::class_ requires T to inherit from CustomClassHolder"); + + public: + /// This constructor actually registers the class type. + /// String argument `namespaceName` is an identifier for the + /// namespace you would like this class to appear in. + /// String argument `className` is the name you would like to + /// see this class exposed as in Python and TorchScript. For example, if + /// you pass `foo` as the namespace name and `Bar` as the className, the + /// class will appear as `torch.classes.foo.Bar` in Python and TorchScript + explicit class_( + const std::string& namespaceName, + const std::string& className, + std::string doc_string = "") + : class_base( + namespaceName, + className, + std::move(doc_string), + typeid(c10::intrusive_ptr), + typeid(c10::tagged_capsule)) {} + + /// def() can be used in conjunction with `torch::init()` to register + /// a constructor for a given C++ class type. For example, passing + /// `torch::init()` would register a two-argument + /// constructor taking an `int` and a `std::string` as argument. + template + class_& def( + torch::detail::types, + std::string doc_string = "", + std::initializer_list default_args = + {}) { // Used in combination with + // torch::init<...>() + auto func = [](c10::tagged_capsule self, Types... args) { + auto classObj = c10::make_intrusive(args...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(std::move(classObj))); + }; + + defineMethod( + "__init__", + std::move(func), + std::move(doc_string), + default_args); + return *this; + } + + // Used in combination with torch::init([]lambda(){......}) + template + class_& def( + InitLambda> init, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto init_lambda_wrapper = [func = std::move(init.f)]( + c10::tagged_capsule self, + ParameterTypes... arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(func, std::forward(arg)...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + + defineMethod( + "__init__", + std::move(init_lambda_wrapper), + std::move(doc_string), + default_args); + + return *this; + } + + /// This is the normal method registration API. `name` is the name that + /// the method will be made accessible by in Python and TorchScript. + /// `f` is a callable object that defines the method. Typically `f` + /// will either be a pointer to a method on `CurClass`, or a lambda + /// expression that takes a `c10::intrusive_ptr` as the first + /// argument (emulating a `this` argument in a C++ method.) + /// + /// Examples: + /// + /// // Exposes method `foo` on C++ class `Foo` as `call_foo()` in + /// // Python and TorchScript + /// .def("call_foo", &Foo::foo) + /// + /// // Exposes the given lambda expression as method `call_lambda()` + /// // in Python and TorchScript. + /// .def("call_lambda", [](const c10::intrusive_ptr& self) { + /// // do something + /// }) + template + class_& def( + std::string name, + Func f, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto wrapped_f = detail::wrap_func(std::move(f)); + defineMethod( + std::move(name), + std::move(wrapped_f), + std::move(doc_string), + default_args); + return *this; + } + + /// Method registration API for static methods. + template + class_& def_static(std::string name, Func func, std::string doc_string = "") { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + std::move(qualMethodName), + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + classTypePtr->addStaticMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// Property registration API for properties with both getter and setter + /// functions. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + SetterFunc setter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + torch::jit::Function* setter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + auto wrapped_setter = + detail::wrap_func(std::move(setter_func)); + setter = defineMethod(name + "_setter", wrapped_setter, doc_string); + + classTypePtr->addProperty(name, getter, setter); + return *this; + } + + /// Property registration API for properties with only getter function. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + classTypePtr->addProperty(name, getter, nullptr); + return *this; + } + + /// Property registration API for properties with read-write access. + template + class_& def_readwrite(const std::string& name, T CurClass::*field) { + auto getter_func = [field = + field](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + auto setter_func = [field = field]( + const c10::intrusive_ptr& self, T value) { + self.get()->*field = value; + }; + + return def_property(name, getter_func, setter_func); + } + + /// Property registration API for properties with read-only access. + template + class_& def_readonly(const std::string& name, T CurClass::*field) { + auto getter_func = + [field = std::move(field)](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + return def_property(name, getter_func); + } + + /// This is an unsafe method registration API added for adding custom JIT + /// backend support via custom C++ classes. It is not for general purpose use. + class_& _def_unboxed( + const std::string& name, + std::function func, + c10::FunctionSchema schema, + std::string doc_string = "") { + auto method = std::make_unique( + qualClassName + "." + name, + std::move(schema), + std::move(func), + std::move(doc_string)); + classTypePtr->addMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// def_pickle() is used to define exactly what state gets serialized + /// or deserialized for a given instance of a custom C++ class in + /// Python or TorchScript. This protocol is equivalent to the Pickle + /// concept of `__getstate__` and `__setstate__` from Python + /// (https://docs.python.org/2/library/pickle.html#object.__getstate__) + /// + /// Currently, both the `get_state` and `set_state` callables must be + /// C++ lambda expressions. They should have the following signatures, + /// where `CurClass` is the class you're registering and `T1` is some object + /// that encapsulates the state of the object. + /// + /// __getstate__(intrusive_ptr) -> T1 + /// __setstate__(T2) -> intrusive_ptr + /// + /// `T1` must be an object that is convertable to IValue by the same rules + /// for custom op/method registration. + /// + /// For the common case, T1 == T2. T1 can also be a subtype of T2. An + /// example where it makes sense for T1 and T2 to differ is if __setstate__ + /// handles legacy formats in a backwards compatible way. + /// + /// Example: + /// + /// .def_pickle( + /// // __getstate__ + /// [](const c10::intrusive_ptr>& self) { + /// return self->stack_; + /// }, + /// [](std::vector state) { // __setstate__ + /// return c10::make_intrusive>( + /// std::vector{"i", "was", "deserialized"}); + /// }) + template + class_& def_pickle(GetStateFn&& get_state, SetStateFn&& set_state) { + static_assert( + c10::guts::is_stateless_lambda>::value && + c10::guts::is_stateless_lambda>::value, + "def_pickle() currently only supports lambdas as " + "__getstate__ and __setstate__ arguments."); + def("__getstate__", std::forward(get_state)); + + // __setstate__ needs to be registered with some custom handling: + // We need to wrap the invocation of the user-provided function + // such that we take the return value (i.e. c10::intrusive_ptr) + // and assign it to the `capsule` attribute. + using SetStateTraits = + c10::guts::infer_function_traits_t>; + using SetStateArg = typename c10::guts::typelist::head_t< + typename SetStateTraits::parameter_types>; + auto setstate_wrapper = [set_state = std::forward(set_state)]( + c10::tagged_capsule self, + SetStateArg&& arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(set_state, std::forward(arg)); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + defineMethod( + "__setstate__", + detail::wrap_func( + std::move(setstate_wrapper))); + + // type validation + auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema(); + auto format_getstate_schema = [&getstate_schema]() { + std::stringstream ss; + ss << getstate_schema; + return ss.str(); + }; + TORCH_CHECK( + getstate_schema.arguments().size() == 1, + "__getstate__ should take exactly one argument: self. Got: ", + format_getstate_schema()); + auto first_arg_type = getstate_schema.arguments().at(0).type(); + TORCH_CHECK( + *first_arg_type == *classTypePtr, + "self argument of __getstate__ must be the custom class type. Got ", + first_arg_type->repr_str()); + TORCH_CHECK( + getstate_schema.returns().size() == 1, + "__getstate__ should return exactly one value for serialization. Got: ", + format_getstate_schema()); + + auto ser_type = getstate_schema.returns().at(0).type(); + auto setstate_schema = classTypePtr->getMethod("__setstate__").getSchema(); + auto arg_type = setstate_schema.arguments().at(1).type(); + TORCH_CHECK( + ser_type->isSubtypeOf(*arg_type), + "__getstate__'s return type should be a subtype of " + "input argument of __setstate__. Got ", + ser_type->repr_str(), + " but expected ", + arg_type->repr_str()); + + return *this; + } + + private: + template + torch::jit::Function* defineMethod( + std::string name, + Func func, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + // If default values are provided for function arguments, there must be + // none (no default values) or default values for all function + // arguments, except for self. This is because argument names are not + // extracted by inferFunctionSchemaSingleReturn, and so there must be a + // torch::arg instance in default_args even for arguments that do not + // have an actual default value provided. + TORCH_CHECK( + default_args.size() == 0 || + default_args.size() == schema.arguments().size() - 1, + "Default values must be specified for none or all arguments"); + + // If there are default args, copy the argument names and default values to + // the function schema. + if (default_args.size() > 0) { + schema = withNewArguments(schema, default_args); + } + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + // TODO: we need to figure out how to profile calls to custom functions + // like this! Currently can't do it because the profiler stuff is in + // libtorch and not ATen + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + qualMethodName, + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + // Register the method here to keep the Method alive. + // ClassTypes do not hold ownership of their methods (normally it + // those are held by the CompilationUnit), so we need a proxy for + // that behavior here. + auto method_val = method.get(); + classTypePtr->addMethod(method_val); + registerCustomClassMethod(std::move(method)); + return method_val; + } +}; + +/// make_custom_class() is a convenient way to create an instance of a +/// registered custom class and wrap it in an IValue, for example when you want +/// to pass the object to TorchScript. Its syntax is equivalent to APIs like +/// `std::make_shared<>` or `c10::make_intrusive<>`. +/// +/// For example, if you have a custom C++ class that can be constructed from an +/// `int` and `std::string`, you might use this API like so: +/// +/// IValue custom_class_iv = torch::make_custom_class(3, +/// "foobarbaz"); +template +c10::IValue make_custom_class(CtorArgs&&... args) { + auto userClassInstance = + c10::make_intrusive(std::forward(args)...); + return c10::IValue(std::move(userClassInstance)); +} + +// Alternative api for creating a torchbind class over torch::class_ this api is +// preffered to prevent size regressions on Edge usecases. Must be used in +// conjunction with TORCH_SELECTIVE_CLASS macro aka +// selective_class("foo_namespace", TORCH_SELECTIVE_CLASS("foo")) +template +inline class_ selective_class_( + const std::string& namespace_name, + detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + return torch::class_(namespace_name, class_name); +} + +template +inline detail::ClassNotSelected selective_class_( + const std::string&, + detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +// jit namespace for backward-compatibility +// We previously defined everything in torch::jit but moved it out to +// better reflect that these features are not limited only to TorchScript +namespace jit { + +using ::torch::class_; +using ::torch::getCustomClass; +using ::torch::init; +using ::torch::isCustomClass; + +} // namespace jit + +template +inline class_ Library::class_(const std::string& className) { + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + className, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, className); +} + +const std::unordered_set getAllCustomClassesNames(); + +template +inline class_ Library::class_(detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + class_name, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, class_name); +} + +template +inline detail::ClassNotSelected Library::class_(detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h b/venv/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h new file mode 100644 index 0000000000000000000000000000000000000000..736d5aacdaa3226e7a247383333823870f978405 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { + +namespace detail { +/** + * In the Facebook internal build (using BUCK), this macro is enabled by + * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer + * binary. + */ +#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE +TORCH_API void record_custom_class(std::string name); + +/** + * Record an instance of a custom class being loaded + * grab portion of string after final '.' from qualified name + * as this seemingly aligns with how users name their custom classes + * example: __torch__.torch.classes.xnnpack.Conv2dOpContext + */ +#define RECORD_CUSTOM_CLASS(NAME) \ + auto name = std::string(NAME); \ + detail::record_custom_class(name.substr(name.find_last_of(".") + 1)); +#else +#define RECORD_CUSTOM_CLASS(NAME) +#endif +} // namespace detail + +/// This struct is used to represent default values for arguments +/// when registering methods for custom classes. +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod, {torch::arg("name") = name}); +struct arg { + // Static method for representing a default value of None. This is meant to + // be used like so: + // torch::arg("name") = torch::arg::none + // and is identical to: + // torch::arg("name") = IValue() + static c10::IValue none() { + return c10::IValue(); + } + + // Explicit constructor. + explicit arg(std::string name) + : name_(std::move(name)), value_(c10::nullopt) {} + // Assignment operator. This enables the pybind-like syntax of + // torch::arg("name") = value. + arg& operator=(const c10::IValue& rhs) { + value_ = rhs; + return *this; + } + + // The name of the argument. This is copied to the schema; argument + // names cannot be extracted from the C++ declaration. + std::string name_; + // IValue's default constructor makes it None, which is not distinguishable + // from an actual, user-provided default value that is None. This boolean + // helps distinguish between the two cases. + c10::optional value_; +}; + +namespace detail { + +// Argument type utilities +template +struct types { + using type = types; +}; + +template +struct WrapMethod; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...)) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...); +}; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...) const) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...) const; +}; + +// Adapter for different callable types +template < + typename CurClass, + typename Func, + std::enable_if_t< + std::is_member_function_pointer>::value, + bool> = false> +WrapMethod wrap_func(Func f) { + return WrapMethod(std::move(f)); +} + +template < + typename CurClass, + typename Func, + std::enable_if_t< + !std::is_member_function_pointer>::value, + bool> = false> +Func wrap_func(Func f) { + return f; +} + +template < + class Functor, + bool AllowDeprecatedTypes, + size_t... ivalue_arg_indices> +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack( + Functor& functor, + jit::Stack& stack, + std::index_sequence) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would + // be unused and we have to silence the compiler warning. + + constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices); + + using IValueArgTypes = + typename c10::guts::infer_function_traits_t::parameter_types; + // TODO We shouldn't use c10::impl stuff directly here. We should use the + // KernelFunction API instead. + return (functor)(c10::impl::ivalue_to_arg< + typename c10::impl::decay_if_not_tensor< + c10::guts::typelist:: + element_t>::type, + AllowDeprecatedTypes>:: + call(torch::jit::peek( + stack, ivalue_arg_indices, num_ivalue_args))...); +} + +template +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack(Functor& functor, jit::Stack& stack) { + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + return call_torchbind_method_from_stack( + functor, stack, std::make_index_sequence()); +} + +template +struct BoxedProxy; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + auto retval = call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(c10::ivalue::from(std::move(retval))); + } +}; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(); + } +}; + +inline bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); +} + +inline void checkValidIdent(const std::string& str, const char* type) { + for (const auto i : c10::irange(str.size())) { + TORCH_CHECK( + validIdent(i, str[i]), + type, + " must be a valid Python/C++ identifier." + " Character '", + str[i], + "' at index ", + i, + " is illegal."); + } +} + +class TORCH_API class_base { + protected: + explicit class_base( + const std::string& namespaceName, + const std::string& className, + std::string doc_string, + const std::type_info& intrusivePtrClassTypeid, + const std::type_info& taggedCapsuleClass); + + static c10::FunctionSchema withNewArguments( + const c10::FunctionSchema& schema, + std::initializer_list default_args); + std::string qualClassName; + at::ClassTypePtr classTypePtr; +}; + +} // namespace detail + +TORCH_API void registerCustomClass(at::ClassTypePtr class_type); +TORCH_API void registerCustomClassMethod(std::unique_ptr method); + +// Given a qualified name (e.g. __torch__.torch.classes.Foo), return +// the ClassType pointer to the Type that describes that custom class, +// or nullptr if no class by that name was found. +TORCH_API at::ClassTypePtr getCustomClass(const std::string& name); + +// Given an IValue, return true if the object contained in that IValue +// is a custom C++ class, otherwise return false. +TORCH_API bool isCustomClass(const c10::IValue& v); + +// This API is for testing purposes ONLY. It should not be used in +// any load-bearing code. +TORCH_API std::vector customClassSchemasForBCCheck(); + +namespace jit { +using ::torch::registerCustomClass; +using ::torch::registerCustomClassMethod; +} // namespace jit + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/extension.h b/venv/lib/python3.10/site-packages/torch/include/torch/extension.h new file mode 100644 index 0000000000000000000000000000000000000000..671ae1aadb8d5f2ad33cfe27a8fe1481856e668b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/extension.h @@ -0,0 +1,9 @@ +#pragma once + +#ifndef TORCH_INDUCTOR_CPP_WRAPPER +// All pure C++ headers for the C++ frontend. +#include +#endif + +// Python bindings for the C++ frontend (includes Python.h). +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/library.h b/venv/lib/python3.10/site-packages/torch/include/torch/library.h new file mode 100644 index 0000000000000000000000000000000000000000..793c87544233acccb2e08f1626d892896de3b5ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/library.h @@ -0,0 +1,1050 @@ +#pragma once + +/// \file +/// +/// This header provides an API for extending PyTorch's core library +/// of operators with user defined operators and data types. This +/// API can be used in a few ways: +/// +/// * You can define new custom operators and classes with TORCH_LIBRARY(), +/// making them available for use in both eager Python as well as in +/// TorchScript. This API is modeled off of pybind11's `PYBIND11_MODULE` +/// macro, as the provided functionality is similar (pybind11 lets you bind +/// C++ to Python only; `torch/library.h` lets you bind C++ simultaneously to +/// Python and TorchScript). +/// +/// * You can override existing operators with TORCH_LIBRARY_IMPL(), +/// providing a new implementation for these operators for a custom +/// backend (e.g., XLA). When you pass operators with tensors of your custom +/// backend, your overridden implementations will be called instead +/// of the standard implementations. +/// +/// * You can use both capabilities at the same time, allowing you +/// to write custom operators that register CPU/CUDA/Autograd +/// implementations without having to write the boilerplate +/// conditionals yourself. +/// +/// For a tutorial style introduction to the library API, check +/// out the [Extending TorchScript with Custom C++ +/// Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html) +/// tutorial. +/// +/// ``` +/// // Define a library whose operators live in the namespace 'myops'. +/// // You must define all of the operators for this library in +/// // this namespace. +/// TORCH_LIBRARY(myops, m) { +/// // Define a operator with exactly one implementation for all backends. +/// m.def("add(Tensor self, Tensor other) -> Tensor", &add_impl); +/// +/// // Define a schema for an operator, but provide no implementation +/// // (use this syntax if you want to use the dispatcher) +/// m.def("mul(Tensor self, Tensor other) -> Tensor"); +/// +/// // Provide an implementation for a defined operator (you can +/// // provide multiple; one per backend). The dispatcher takes care of +/// // calling the correct implementation depending on if we get a CPU +/// // tensor or a CUDA tensor +/// m.impl("mul", torch::kCPU, &mul_cpu_impl); +/// m.impl("mul", torch::kCUDA, &mul_cuda_impl); +/// } +/// +/// // Define implementations for operators for a non-standard backend, +/// // e.g., XLA (valid values are entries of DispatchKey). This can +/// // be used to define operators in a different file than the initial +/// // TORCH_LIBRARY definition (e.g., if it is in an external library) +/// TORCH_LIBRARY_IMPL(myops, XLA, m) { +/// m.impl("mul", &mul_xla_impl); +/// } +/// ``` + +#include +#include +#include +#include +#include + +// Just for inferFunctionSchemaFromFunctor +#include +#include + +namespace torch { + +#if defined C10_MOBILE +/** + * The NoInferSchemaTag is a type name used to indicate that this call to the + * CppFunction constructor should not trigger schema inference from functor. + * Schema inference from functor utilizes template meta-programming, and is + * costly from a size perspective. Ideally, one would expect that the schema + * inference would require very little binary size since most of the + * computation can be done by the compiler at build time, but that isn't + * necessarily the case. + * + * Schema inference is elided only for mobile use-cases where we don't need + * the additional runtime cost or size overhead on client devices. + * + */ +struct NoInferSchemaTag {}; +#endif + +#define HAS_PT2_COMPLIANT_TAG + +// For multipy/torchdeploy use case +enum class _RegisterOrVerify { REGISTER, VERIFY }; + +template +class class_; + +#define HAS_IMPL_ABSTRACT_PYSTUB + +/// Represents a C++ function that implements an operator. Most users won't +/// interact directly with this class, except via error messages: the +/// constructors this function define the set of permissible "function"-like +/// things you can bind via the interface. +/// +/// This class erases the type of the passed in function, but durably records +/// the type via an inferred schema for the function. +class TORCH_API CppFunction final { + // TODO: This is morally the same thing as KernelRegistrationConfig, but it's + // opaque to the user. + + public: + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl)` + template + explicit CppFunction( + Func* f, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()), + schema_( + c10::detail::inferFunctionSchemaFromFunctor>()), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl))` + template + explicit CppFunction( + FuncPtr f, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + typename FuncPtr::FuncType>()), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... })` + template + explicit CppFunction( + Lambda&& f, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + std::decay_t>()), + debug_() {} + +#if defined C10_MOBILE + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl, + /// NoInferSchemaTag())` + template + explicit CppFunction( + Func* f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl), NoInferSchemaTag())` + template + explicit CppFunction( + FuncPtr f, + NoInferSchemaTag, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... }. NoInferSchemaTag())` + template + explicit CppFunction( + Lambda&& f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} +#endif + + ~CppFunction(); + + CppFunction(CppFunction&&) noexcept = default; + + CppFunction& operator=(CppFunction&&) = default; + + /// \private + /// Creates a function from a type-erased boxed kernel. + static CppFunction makeFromBoxedKernel(c10::BoxedKernel kernel) { + return CppFunction( + c10::KernelFunction::makeFromBoxedKernel(std::move(kernel)), + /* cpp_signature */ c10::nullopt, // not known for boxed functions + /* schema */ nullptr); + } + + /// This creates a fallthrough function. Fallthrough functions + /// immediately redispatch to the next available dispatch key, + /// but are implemented more efficiently than a hand written + /// function done in the same way. + static CppFunction makeFallthrough() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFallthrough()); + } + + /// \private + /// + /// Creates a function that raises an error saying that named tensors + /// are not supported when called. + static CppFunction makeNamedNotSupported() { + return makeFromBoxedKernel(c10::BoxedKernel::makeNamedNotSupported()); + } + + /// Create a function from a boxed kernel function with signature + /// `void(const OperatorHandle&, Stack*)`; i.e., they receive a + /// stack of arguments in a boxed calling convention, rather than + /// in the native C++ calling convention. Boxed functions are + /// typically only used to register backend fallbacks via + /// torch::Library::fallback(). + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + // Variant that takes in a boxed kernel function with a plumbed + // DispatchKeySet. See Note [Plumbing Keys Through The Dispatcher] for + // details. + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + /// Create a function from a boxed kernel functor which defines + /// `operator()(const OperatorHandle&, DispatchKeySet, Stack*)` + /// (receiving arguments from boxed calling convention) and inherits + /// from `c10::OperatorKernel`. Unlike makeFromBoxedFunction, functions + /// registered in this way can also carry additional state which + /// is managed by the functor; this is useful if you're writing an + /// adapter to some other implementation, e.g., a Python callable, which + /// is dynamically associated with the registered kernel. + template + static CppFunction makeFromBoxedFunctor( + std::unique_ptr kernelFunctor) { + return makeFromBoxedKernel( + c10::BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); + } + + /// Create a function from an unboxed kernel function. + /// This is typically used to register common operators. + template < + typename FuncPtr, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr* f) { + return CppFunction(f); + } + + /// Create a function from a compile time unboxed kernel function pointer. + /// This is typically used to register common operators. + /// Compile time function pointers can be used to allow the compiler + /// to optimize (e.g. inline) calls to it. + template < + typename FuncPtr, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr f) { + return CppFunction(f); + } + + CppFunction&& debug(std::string d) && { + debug_ = std::move(d); + return std::move(*this); + } + + private: + c10::optional dispatch_key_; + c10::KernelFunction func_; + c10::optional cpp_signature_; + std::unique_ptr schema_; + std::string debug_; + + // The "setter" for dispatch_key_ + template + friend CppFunction dispatch(c10::DispatchKey, Func&&); + + // The only class which actually pulls out values from CppFunction (does so + // destructively, felt too lazy to write accessors that I don't even + // want users to use) + friend class Library; + + CppFunction( + c10::KernelFunction func, + c10::optional cpp_signature, + std::unique_ptr schema); +}; + +/// \defgroup torch-dispatch-overloads torch::dispatch overloads + +/// Create a torch::CppFunction which is associated with a specific +/// dispatch key. torch::CppFunctions that are tagged with a +/// c10::DispatchKey don't get invoked unless the dispatcher determines +/// that this particular c10::DispatchKey is the one that should be +/// dispatched to. +/// +/// This function is generally not used directly, instead, prefer using +/// TORCH_LIBRARY_IMPL(), which will implicitly set the c10::DispatchKey +/// for all registration calls inside of its body. +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DispatchKey k, Func&& raw_f) { + CppFunction f(std::forward(raw_f)); + if (k == c10::DispatchKey::CatchAll) { + f.dispatch_key_ = c10::nullopt; + } else { + f.dispatch_key_ = k; + } + return f; +} + +/// Convenience overload of dispatch() which accepts c10::DeviceType +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) { + auto deviceTypeToDispatchKey = [](c10::DeviceType t) { + switch (t) { + // This list is synchronized with the k-constants in c10/core/DeviceType.h + case c10::DeviceType::CPU: + return c10::DispatchKey::CPU; + case c10::DeviceType::CUDA: + return c10::DispatchKey::CUDA; + case c10::DeviceType::IPU: + return c10::DispatchKey::IPU; + case c10::DeviceType::XLA: + return c10::DispatchKey::XLA; + case c10::DeviceType::Lazy: + return c10::DispatchKey::Lazy; + case c10::DeviceType::XPU: + return c10::DispatchKey::XPU; + case c10::DeviceType::MPS: + return c10::DispatchKey::MPS; + case c10::DeviceType::Meta: + return c10::DispatchKey::Meta; + case c10::DeviceType::HIP: + return c10::DispatchKey::HIP; + case c10::DeviceType::ORT: + return c10::DispatchKey::ORT; + case c10::DeviceType::HPU: + return c10::DispatchKey::HPU; + case c10::DeviceType::MTIA: + return c10::DispatchKey::MTIA; + case c10::DeviceType::PrivateUse1: + return c10::DispatchKey::PrivateUse1; + default: + TORCH_CHECK( + false, + "Device type ", + t, + " cannot be overloaded at dispatch time, " + "please file a bug report explaining what you were trying to do."); + } + }; + return dispatch(deviceTypeToDispatchKey(type), std::forward(raw_f)); +} + +/// \defgroup torch-schema-overloads torch::schema overloads + +/// Construct a c10::FunctionSchema from a string, with an explicitly +/// specified c10::AliasAnalysisKind. Ordinarily, schemas are simply +/// passed in as strings, but if you need to specify a custom alias +/// analysis, you can replace the string with a call to this function. +/// +/// ``` +/// // Default alias analysis (FROM_SCHEMA) +/// m.def("def3(Tensor self) -> Tensor"); +/// // Pure function alias analysis +/// m.def(torch::schema("def3(Tensor self) -> Tensor", +/// c10::AliasAnalysisKind::PURE_FUNCTION)); +/// ``` +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) { + c10::FunctionSchema s = torch::jit::parseSchema(str); + s.setAliasAnalysis(k); + return s; +} + +/// Function schemas can be directly constructed from string literals. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* s) { + return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA); +} + +/// \private +/// +/// Already constructed function schemas are accepted if they are +/// rvalues. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema&& schema(c10::FunctionSchema&& s) { + return std::move(s); +} + +namespace detail { + +inline std::variant constructSchemaOrName( + c10::FunctionSchema&& s) { + return std::move(s); +} +inline std::variant constructSchemaOrName( + c10::OperatorName&& n) { + return std::move(n); +} +inline std::variant +constructSchemaOrName(const char* str) { + auto s = torch::jit::parseSchemaOrName(str); + if (std::holds_alternative(s)) { + std::get(s).setAliasAnalysis( + c10::AliasAnalysisKind::FROM_SCHEMA); + } + return s; +} + +class TorchLibraryInit; + +} // namespace detail + +// Note [Selective build] +// ~~~~~~~~~~~~~~~~~~~~~~ +// In some settings, especially mobile, it is important to avoid compiling any +// references to functions that you aren't actually going to use, so that they +// can be eliminated by the linker. We call this capability "selective build". +// +// A very easy way to implement selective build which results in a lot of +// boilerplate is to just add ifdef's around every registration call, but this +// means you have to write a lot of extra lines of code at every registration +// site, and it also means you have to define some munging scheme to map +// operators to macros. +// +// Instead of doing this, we have a different mechanism centered around the +// concept of a SelectiveStr. A selective name is like a const char* string, +// except it also carries at compile time a boolean saying whether or not a +// registration should actually happen or not. We then have extra overloads +// which bypass registration entirely if a selective name is disabled. We do a +// constexpr test to see if a operator should be enabled or not; this is +// currently implemented in ATen/core/op_registration/op_allowlist.h + +namespace detail { + +// dummy class for non selected custom torchbind classes +class ClassNotSelected { + public: + ClassNotSelected& def_pickle(...) { + return *this; + } + ClassNotSelected& def(...) { + return *this; + } +}; + +// A SelectiveStr is like a const char*, except that it also comes +// with a type brand that says whether or not the name is enabled or +// not. If the string is disabled, then (at compile time) we DON'T generate +// a registration call for it. This class is not intended to be called +// directly; use TORCH_SELECTIVE_NAME or TORCH_SELECTIVE_SCHEMA macros below +// to create it. +template +class SelectiveStr { + public: + constexpr explicit SelectiveStr(const char* name) : name_(name) {} + constexpr operator const char*() { + return name_; + } + + private: + const char* name_; +}; + +#define TORCH_SELECTIVE_CLASS(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_NAME(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_SCHEMA(n) \ + torch::detail::SelectiveStr(n) + +} // namespace detail + +/// This object provides the API for defining operators and providing +/// implementations at dispatch keys. Typically, a torch::Library +/// is not allocated directly; instead it is created by the +/// TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() macros. +/// +/// Most methods on torch::Library return a reference to itself, +/// supporting method chaining. +/// +/// ``` +/// // Examples: +/// +/// TORCH_LIBRARY(torchvision, m) { +/// // m is a torch::Library +/// m.def("roi_align", ...); +/// ... +/// } +/// +/// TORCH_LIBRARY_IMPL(aten, XLA, m) { +/// // m is a torch::Library +/// m.impl("add", ...); +/// ... +/// } +/// ``` +/// +class TORCH_API Library final { + public: + /// \private + /// + /// Which type of macro produced this Library + enum Kind { + DEF, // from TORCH_LIBRARY (no qualifier) + IMPL, + FRAGMENT, + }; + + /// \private + /// + /// Use TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() instead of using these + /// constructors directly + Library( + Kind kind, + std::string ns, + c10::optional k, + const char* file, + uint32_t line); + + Library(const Library&) = delete; + Library& operator=(const Library&) = delete; + Library(Library&&) = default; + Library& operator=(Library&&) = default; + + // Some notes about the API design here. We had the following constraints: + // + // - We need to support multiple "types" of arguments for schema and + // functions (e.g., unnamed lambda types, regular functions, const char*, + // fully instantiated schemas) + // - We don't want to write exponentially many overloads + // - We don't want to rely on implicit conversion to a common type, + // because the C++ compiler will only be willing to do a single + // implicit conversion (reducing the set of valid types which you + // can invoke with); also error messages are worse when an implicit + // conversion is not selected (as the compiler will not explain + // why it didn't select an implicit conversion; this is different + // from overloads where it will explain each candidate overload and + // why it didn't apply) + // + // To solve all of these constraints at the same time, we use a trick taken + // from the pybind11 library: template over the argument in the user visible + // API, and inside of the templated function explicitly call an overloaded + // function to resolve the argument to a real type. You get the good error + // messages from overloads, but at the same time you only need to write the + // overload for any given argument type once. + + /// Declare an operator with a schema, but don't provide any implementations + /// for it. You're expected to then provide implementations using the + /// impl() method. All template arguments are inferred. + /// + /// \param raw_schema The schema of the operator to be defined. + /// Typically, this is a `const char*` string literal, but any type + /// accepted by torch::schema() is accepted here. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add(Tensor self, Tensor other) -> Tensor"); + /// } + /// ``` + + template + Library& def( + Schema&& raw_schema, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + c10::FunctionSchema s = schema(std::forward(raw_schema)); + return _def(std::move(s), nullptr, tags, rv); + } + + /// Declares that for all operators that are subsequently def'ed, their + /// abstract impls may be found in the given Python module (pymodule). + /// This registers some help text that is used if the abstract impl + /// cannot be found. + /// + /// Args: + /// - pymodule: the python module + /// - context: We may include this in the error message. + Library& impl_abstract_pystub(const char* pymodule, const char* context = "") { + impl_abstract_pystub_ = {pymodule, context}; + return *this; + } + + /// Define an operator for a schema and then register an implementation for + /// it. This is typically what you would use if you aren't planning + /// on making use of the dispatcher to structure your operator + /// implementation. It's roughly equivalent to calling def() and + /// then impl(), but if you omit the schema of the operator, we will + /// infer it from the type of your C++ function. All template + /// arguments are inferred. + /// + /// \param raw_name_or_schema The schema of the operator to be + /// defined, or just the name of the operator if the schema is to be + /// inferred from `raw_f`. Typically a `const char*` literal. + /// \param raw_f The C++ function that implements this operator. + /// Any valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add", add_fn); + /// } + /// ``` + template + Library& def(NameOrSchema&& raw_name_or_schema, Func&& raw_f, + const std::vector& tags = {}) & { + CppFunction f(std::forward(raw_f)); + return _def( + detail::constructSchemaOrName( + ::std::forward(raw_name_or_schema)), + ::std::move(f), tags); + } + + /// Register an implementation for an operator. You may register multiple + /// implementations for a single operator at different dispatch keys + /// (see torch::dispatch()). Implementations must have a corresponding + /// declaration (from def()), otherwise they are invalid. If you plan + /// to register multiple implementations, DO NOT provide a function + /// implementation when you def() the operator. + /// + /// \param name The name of the operator to implement. Do NOT provide + /// schema here. + /// \param raw_f The C++ function that implements this operator. Any + /// valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY_IMPL(myops, CUDA, m) { + /// m.impl("add", add_cuda); + /// } + /// ``` + template + Library& impl( + Name name, + Func&& raw_f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def +#if defined C10_MOBILE + CppFunction f(std::forward(raw_f), NoInferSchemaTag()); +#else + CppFunction f(std::forward(raw_f)); +#endif + return _impl(name, std::move(f), rv); + } + +#if defined C10_MOBILE + // Note: This overload is needed only for C10_MOBILE, since the automatically + // defined copy constructor for the CppFunction doesn't have the additional + // NoInferSchemaTag argument. We define the overload for the impl() function + // to accept a CppFunction&& argument. The already constructed CppFunction + // object may or may not have the inferred schema, but it doesn't matter + // for our purposes since if it already has the inferred schema, then we + // might as well just pass it through directly. + // + template + Library& impl(Name name, CppFunction&& raw_f) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def + CppFunction f(std::forward(raw_f)); + return _impl(name, std::move(f)); + } +#endif + + // Helper for getting an OperatorName for a const char*. You probably + // don't need this. + c10::OperatorName _resolve(const char* name) const; + + /// \private + /// + /// Convenience overload for directly specifying the dispatch key when + /// impl(). You probably don't need this; instead, prefer specifying + /// the dispatch key for the entire block in TORCH_LIBRARY_IMPL() + template + Library& impl(Name name, Dispatch&& key, Func&& raw_f) & { + return impl( + name, dispatch(std::forward(key), std::forward(raw_f))); + } + + template + Library& impl_UNBOXED(Name /*name*/, Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + // These overloads cover cases when a SelectiveStr (see Note [Selective + // build]) has been disabled at compile time. In that case, don't generate + // any code referencing the passed in functions at all. + Library& def(detail::SelectiveStr, const std::vector& tags = {}) & { + return *this; + } + Library& def(detail::SelectiveStr raw_schema, const std::vector& tags = {}) & { + return def(raw_schema.operator const char*(), tags); + } + template + Library& def(detail::SelectiveStr, Func&& /*raw_f*/, const std::vector& tags = {}) & { + return *this; + } + template + Library& def(detail::SelectiveStr raw_name_or_schema, Func&& raw_f, const std::vector& tags = {}) & { + return def( + raw_name_or_schema.operator const char*(), std::forward(raw_f), tags); + } + + template + Library& impl(detail::SelectiveStr, Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl( + detail::SelectiveStr, + Dispatch&& /*key*/, + Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + template + Library& impl(detail::SelectiveStr name, Func&& raw_f) & { + return impl(name.operator const char*(), std::forward(raw_f)); + } + template + Library& impl( + detail::SelectiveStr name, + Dispatch&& key, + Func&& raw_f) & { + return impl( + name.operator const char*(), + std::forward(key), + std::forward(raw_f)); + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + /// Register a fallback implementation for all operators which will be used + /// if there is not a specific implementation for an operator available. + /// There MUST be a DispatchKey associated with a fallback; e.g., + /// only call this from TORCH_LIBRARY_IMPL() with namespace `_`. + /// + /// \param raw_f The function that implements the fallback. Unboxed + /// functions typically do not work as fallback functions, as + /// fallback functions must work for every operator (even though + /// they have varying type signatures). Typical arguments are + /// CppFunction::makeFallthrough() or + /// CppFunction::makeFromBoxedFunction() + /// + /// ``` + /// // Example: + /// + /// TORCH_LIBRARY_IMPL(_, AutogradXLA, m) { + /// // If there is not a kernel explicitly registered + /// // for AutogradXLA, fallthrough to the next + /// // available kernel + /// m.fallback(torch::CppFunction::makeFallthrough()); + /// } + /// + /// // See aten/src/ATen/core/dispatch/backend_fallback_test.cpp + /// // for a full example of boxed fallback + /// ``` + template + Library& fallback(Func&& raw_f) & { + CppFunction f((std::forward(raw_f))); + return _fallback(std::move(f)); + } + + template + inline torch::class_ class_(const std::string& className); + + // These overloads enable the use of selective build on classes registered + // within a library. The API is the same as before with 1 minor change. + // Instead of m.class_("foo") you instead do + // m.class_(TORCH_SELECTIVE_CLASS("foo")) + template + inline torch::class_ class_(detail::SelectiveStr className); + + template + inline detail::ClassNotSelected class_(detail::SelectiveStr className); + + // De-registers all registrations created with this Library + void reset(); + + private: + Kind kind_; + c10::optional ns_; + c10::optional dispatch_key_; + c10::optional> impl_abstract_pystub_; + const char* file_; + uint32_t line_; + + std::vector registrars_; + + friend class detail::TorchLibraryInit; + + // Non-user visible actual implementations of functions. These aren't + // public because we only implement & qualifier and not && qualifier + Library& _def( + c10::FunctionSchema&& schema, + c10::OperatorName* out_name = nullptr, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _def( + std::variant&&, + CppFunction&& f, + const std::vector& tags = {}) &; + Library& _impl( + const char* name, + CppFunction&& f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _fallback(CppFunction&& f) &; + + at::OperatorName _parseNameForLib(const char* name_str) const; +}; + +namespace detail { + +class TorchLibraryInit final { + private: + using InitFn = void(Library&); + Library lib_; + + public: + TorchLibraryInit( + Library::Kind kind, + InitFn* fn, + const char* ns, + c10::optional k, + const char* file, + uint32_t line) + : lib_(kind, ns, k, file, line) { + fn(lib_); + } +}; + +} // namespace detail + +} // namespace torch + +// NB: The EXACT NAMING of the initializer functions (e.g., +// TORCH_LIBRARY_init_aten) matters for the code analyzer; +// see the regexes at tools/code_analyzer/run_analyzer.sh + +/// Macro for defining a function that will be run at static +/// initialization time to define a library of operators in the +/// namespace `ns` (must be a valid C++ identifier, no quotes). +/// Use this macro when you want to define a new set of custom operators +/// that do not already exist in PyTorch. +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY(myops, m) { +/// // m is a torch::Library; methods on it will define +/// // operators in the myops namespace +/// m.def("add", add_impl); +/// } +/// ``` +/// +/// The `m` argument is bound to a torch::Library that is used to +/// register operators. There may only be one TORCH_LIBRARY() +/// for any given namespace. +#define TORCH_LIBRARY(ns, m) \ + static void TORCH_LIBRARY_init_##ns(torch::Library&); \ + static const torch::detail::TorchLibraryInit TORCH_LIBRARY_static_init_##ns( \ + torch::Library::DEF, \ + &TORCH_LIBRARY_init_##ns, \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void TORCH_LIBRARY_init_##ns(torch::Library& m) + +/// \private +/// +/// This macro is a version of TORCH_LIBRARY() that doesn't enforce that there +/// is only one library (it is a "fragment"). This is used inside the +/// PerOpRegistration.cpp file, as well as in places where all op registrations +/// within the same namespace cannot be easily put into one macro block +/// (this is mostly the case for custom ops in fbcode that were ported from +/// the old API) +#define TORCH_LIBRARY_FRAGMENT(ns, m) _TORCH_LIBRARY_FRAGMENT(ns, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions This can happen if TORCH_LIBRARY_FRAGMENT is called +/// multiple times with the same namespace in the same translation unit. Note +/// that the TORCH_LIBRARY variant doesn't run into this problem, because it +/// enforces that it can only be called once for a given namespace. +#define _TORCH_LIBRARY_FRAGMENT(ns, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( \ + torch::Library::FRAGMENT, \ + &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library & m) + +/// Macro for defining a function that will be run at static +/// initialization time to define operator overrides for dispatch key +/// `k` (must be an unqualified enum member of c10::DispatchKey) in +/// namespace `ns` (must be a valid C++ identifer, no quotes). Use this +/// macro when you want to implement a preexisting set of custom +/// operators on a new dispatch key (e.g., you want to provide CUDA +/// implementations of already existing operators). One common usage +/// pattern is to use TORCH_LIBRARY() to define schema for all new +/// operators you want to define, and then use several +/// TORCH_LIBRARY_IMPL() blocks to provide implementations of the +/// operator for CPU, CUDA and Autograd. +/// +/// In some cases, you need to define something that applies to all namespaces, +/// not just one namespace (usually a fallback). In that case, use the reserved +/// namespace _, e.g., +/// +/// ``` +/// TORCH_LIBRARY_IMPL(_, XLA, m) { +/// m.fallback(xla_fallback); +/// } +/// ``` +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY_IMPL(myops, CPU, m) { +/// // m is a torch::Library; methods on it will define +/// // CPU implementations of operators in the myops namespace. +/// // It is NOT valid to call torch::Library::def() +/// // in this context. +/// m.impl("add", add_cpu_impl); +/// } +/// ``` +/// +/// If ``add_cpu_impl`` is an overloaded function, use a +/// ``static_cast`` to specify which overload you want +/// (by providing the full type). +/// +// NB: if the dispatch key is not whitelisted, we simply omit the Library +// call entirely +#define TORCH_LIBRARY_IMPL(ns, k, m) _TORCH_LIBRARY_IMPL(ns, k, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions. This can happen if TORCH_LIBRARY_IMPL is called +/// multiple times with the same namespace and dispatch key in the same +/// translation unit. +#define _TORCH_LIBRARY_IMPL(ns, k, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( \ + torch::Library::IMPL, \ + (c10::impl::dispatch_key_allowlist_check(c10::DispatchKey::k) \ + ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) \ + : [](torch::Library&) -> void {}), \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library & m) + +// These are variants of the macros above which are to be used for testing (they +// don't setup the static initializer, so you can control the visibility of +// the allocated library yourself). +// +// DO NOT use these in production code, they are NOT understood by the +// code analyzer and will be incorrectly analyzed in those situations. + +/// \private +#define MAKE_TORCH_LIBRARY(ns) \ + torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) +/// \private +#define MAKE_TORCH_LIBRARY_IMPL(ns, k) \ + torch::Library( \ + torch::Library::IMPL, \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__) + +// Make the custom class API visible, so it is available from +// torch::Library. + +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/script.h b/venv/lib/python3.10/site-packages/torch/include/torch/script.h new file mode 100644 index 0000000000000000000000000000000000000000..58510670613b58ec9b39f3d69d652be6cc0ce998 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/script.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/xnnpack.h b/venv/lib/python3.10/site-packages/torch/include/xnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..e71be0fd57ffc1ef2cc67b2fc8fb20fc4288a1d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/xnnpack.h @@ -0,0 +1,6172 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// Copyright 2019 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// The number of bytes XNNPACK may read beyond array bounds. +/// The caller must allocate at least this many extra bytes after the tensor data passed to XNNPACK. +/// +/// Note: XNNPACK reads, but never writes beyond array bounds. +#define XNN_EXTRA_BYTES 16 + +/// Maximum number of dimensions in tensor shape. +#define XNN_MAX_TENSOR_DIMS 6 + +/// Allow sparse inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider sparse inference, but does not guarantee it. +#define XNN_FLAG_HINT_SPARSE_INFERENCE 0x00000001 + +/// Allow IEEE FP16 inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider IEEE FP16 inference, but does not guarantee it. +#define XNN_FLAG_HINT_FP16_INFERENCE 0x00000002 + +/// Force IEEE FP16 inference in a Runtime, and fail if FP16 inference is not possible. +/// +/// Note: this flag guarantees that XNNPACK will use IEEE FP16 inference, or fail to create the Runtime object. +/// Warning: on x86 systems FP16 computations will be emulated at a substantial performance cost. +#define XNN_FLAG_FORCE_FP16_INFERENCE 0x00000004 + +/// Enable timing of each operator's runtime. +#define XNN_FLAG_BASIC_PROFILING 0x00000008 + +/// Enable the just-in-time compiler. +#define XNN_FLAG_JIT 0x00000010 + +/// The convolution operator represents a depthwise convolution, and use HWGo layout for filters. +#define XNN_FLAG_DEPTHWISE_CONVOLUTION 0x00000001 + +/// Assume transposed weights in a fully connected operator. +#define XNN_FLAG_TRANSPOSE_WEIGHTS 0x00000001 + +/// The operator assumes NHWC layout for the input, regardless of the output layout. +#define XNN_FLAG_INPUT_NHWC 0x00000002 + +/// Match "SAME" padding in TensorFlow. Exact padding values are computed dynamically depending on input size. +#define XNN_FLAG_TENSORFLOW_SAME_PADDING 0x00000004 + +/// Assume transposed weights in a batch matrix multiply operator. +#define XNN_FLAG_TRANSPOSE_B XNN_FLAG_TRANSPOSE_WEIGHTS + +/// Assume transposed input in a batch matrix multiply operator. +#define XNN_FLAG_TRANSPOSE_A 0x00000002 + +/// Implicitly flatten and reshape input of a Fully Connected operator into a 2D tensor. +#define XNN_FLAG_TENSORFLOW_RESHAPE_2D 0x00000004 + +/// Match behaviour of TensorFlow 1.x. +#define XNN_FLAG_TENSORFLOW_LEGACY_MODE 0x00000004 + +/// Static weights of the FP16 operator are in FP32 format. +#define XNN_FLAG_FP32_STATIC_WEIGHTS 0x00000008 + +/// Align corners of input and output images in resize operations. +#define XNN_FLAG_ALIGN_CORNERS 0x00000008 + +/// Yield worker threads of the thread pool to the system scheduler after the inference. +#define XNN_FLAG_YIELD_WORKERS 0x00000010 + +/// Use transient indirection buffer to reduce memory footprint +#define XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER 0x00000020 + +/// Reduce the dimensions. +#define XNN_FLAG_REDUCE_DIMS 0x00000040 + +/// The number of entries in an array of xnn_dynamic_quantization_params that XNNPACK may read beyond array bounds. +/// The caller must allocate at least this many extra xnn_dynamic_quantization_params before passing the array to XNNPACK. +/// +/// Note: XNNPACK reads, but never writes beyond array bounds. +#define XNN_EXTRA_QUANTIZATION_PARAMS 8 + +struct xnn_dynamic_quantization_params { + int32_t zero_point; + float scale; +}; + +/// Status code for any XNNPACK function call. +enum xnn_status { + /// The call succeeded, and all output arguments now contain valid data. + xnn_status_success = 0, + xnn_status_uninitialized = 1, + xnn_status_invalid_parameter = 2, + xnn_status_invalid_state = 3, + xnn_status_unsupported_parameter = 4, + xnn_status_unsupported_hardware = 5, + xnn_status_out_of_memory = 6, + xnn_status_reallocation_required = 7, +}; + +struct xnn_allocator { + /// User-specified pointer that will be passed as-is to all functions in this structure. + void* context; + /// Pointer to a function to be called for general memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*allocate)(void* context, size_t size); + /// Pointer to a function to be called for general memory re-allocation, i.e. to increase or shrink a previously + /// allocated memory block. The content of the old memory block is copied to the new memory block. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref reallocate call is equivalent to an @ref allocate call. + /// @param size - The new size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the newly allocated memory block of at least @ref size bytes with the content of the previous + /// memory block. + /// If allocation fails, the function must return NULL, but must not release the previous memory block. + void* (*reallocate)(void* context, void* pointer, size_t size); + /// Pointer to a function to be called for general memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref deallocate call is a no-op. + void (*deallocate)(void* context, void* pointer); + /// Pointer to a function to be called for aligned memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param alignment - The alignment of the memory block to allocate, in bytes. Alignment is always a power-of-2. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*aligned_allocate)(void* context, size_t alignment, size_t size); + /// Pointer to a function to be called for aligned memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref aligned_allocate function. Can be NULL. + /// If the pointer is NULL, the @ref aligned_deallocate call is a no-op. + void (*aligned_deallocate)(void* context, void* pointer); +}; + +/// Initialize XNNPACK library. +/// +/// XNNPACK must be successfully initialized before use. During initialization, XNNPACK populates internal structures +/// depending on the host processor. Initialization can be time-consuming. +/// +/// @param[in] allocator - structure with function pointers to be use for memory allocation and de-allocation. +/// If this argument is NULL, system-provided memory management functions (e.g. malloc/free) +/// will be used. +/// +/// @retval xnn_status_success - XNNPACK is successfully initialized and ready to use. +/// @retval xnn_status_out_of_memory - initialization failed due to out-of-memory condition. +/// @retval xnn_status_unsupported_hardware - initialization failed because the host processor does not satisfy the +/// minimum hardware requirements for XNNPACK. E.g. this may happen on x86 +/// processors without SSE2 extension, or on 32-bit ARM processors without +/// the NEON SIMD extension. +enum xnn_status xnn_initialize(const struct xnn_allocator* allocator); + +/// Deinitialize XNNPACK library. +/// +/// To avoid memory and resource leaks, users must call xnn_deinitialize once for each successful xnn_initialize call. +/// +/// @retval xnn_status_success - deinitialization call succeeded. +enum xnn_status xnn_deinitialize(void); + +/// Subgraph is an abstract representation of a neural network model. +/// Subgraph objects are used to define Values (tensors) and Nodes (operators) comprising the model. +typedef struct xnn_subgraph* xnn_subgraph_t; + +/// Create a empty Subgraph object. +/// +/// @param external_value_ids - number of Value IDs to reserve for communication with external graph representation. +/// The Subgraph object would avoid creating internal Value IDs in the +/// [0, reserved_value_ids-1] range. +/// @param flags - binary features of the subgraph. No supported flags are currently defined. +/// @param subgraph_out - pointer to the variable that will be initialized with a handle to the Subgraph object upon +/// successful return. +enum xnn_status xnn_create_subgraph( + uint32_t external_value_ids, + uint32_t flags, + xnn_subgraph_t* subgraph_out); + +/// Destroy a Subgraph object, as well as Values, and Nodes associated with the subgraph. +/// +/// @param subgraph - the Subgraph object to destroy. +enum xnn_status xnn_delete_subgraph( + xnn_subgraph_t subgraph); + +#define XNN_VALUE_FLAG_EXTERNAL_INPUT 0x00000001 +#define XNN_VALUE_FLAG_EXTERNAL_OUTPUT 0x00000002 +#define XNN_VALUE_FLAG_PERSISTENT 0x00000004 + +#define XNN_INVALID_VALUE_ID UINT32_MAX + +/// Type of elements in a Value object. +enum xnn_datatype { + /// Invalid data type. Valid Values never have this datatype. + xnn_datatype_invalid = 0, + /// IEEE754 single-precision floating-point. + xnn_datatype_fp32 = 1, + /// IEEE754 half-precision floating-point. + xnn_datatype_fp16 = 2, + /// Quantized 8-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint8 = 3, + /// Quantized 8-bit unsigned integer with shared per-Value quantization parameters. + xnn_datatype_quint8 = 4, + /// Quantized 32-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint32 = 5, + /// Quantized 8-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint8 = 6, + /// Quantized 32-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint32 = 7, + /// Quantized 4-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint4 = 8, + /// Dynamically quantized 8-bit signed integer with per-batch quantization parameters. + xnn_datatype_qdint8 = 9, +}; + +/// Define a tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - multiplication factor to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + int32_t zero_point, + float scale, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +enum xnn_status xnn_define_channelwise_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Validate the dimensions, channel_dim, zero point, datatype, and scale of a quantized tensor-type. +/// +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - multiplication factor to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_validate_quantized_tensor( + enum xnn_datatype datatype, + int32_t zero_point, + float scale, + size_t num_dims, + const size_t* dims); + +/// Validate the dimensions, channel_dim, zero point, datatype, and scales of a channelwise quantized tensor-type. +/// +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - per-channel multiplication factors to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters. +/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution, +/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in +/// the Depthwise Convolution operators. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_validate_channelwise_quantized_tensor( + enum xnn_datatype datatype, + int32_t zero_point, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims); + +/// Define a channelwise quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - per-channel multiplication factors to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters. +/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution, +/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in +/// the Depthwise Convolution operators. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_channelwise_quantized_tensor_value_v2( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + int32_t zero_point, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a dynamically quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param num_dims - number of dimensions in the shape. +/// @param num_non_batch_dims - number of non-batch dimensions in the shape. The leading (num_dims - num_non_batch_dims) +/// dimensions will be flattened and treated as batch size. A set of quantization parameters +/// will be calculated for each batch element. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. No supported flags are currently defined. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_dynamically_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + size_t num_dims, + size_t num_nonbatch_dims, + const size_t* dims, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a Convert Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Convert Node. No supported flags are currently defined. +enum xnn_status xnn_define_convert( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with [groups * +/// group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Deconvolution (Transposed Convolution) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param adjustment_height - additional elements in the bottom of the 2D output data. +/// @param adjustment_width - additional elements to the right of the 2D output data. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param upsampling_height - height of upsampling region for deconvolution input (deconvolution height stride). +/// @param upsampling_width - width of upsampling region for deconvolution input (deconvolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [groups * group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Deconvolution Node. No supported flags are currently defined. +enum xnn_status xnn_define_deconvolution_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t upsampling_height, + uint32_t upsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Depthwise Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param depth_multiplier - ratio of output channels to input channels. +/// @param input_channels - number of input channels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [1, kernel_height, kernel_width, input_channels * depth_multiplier] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Depthwise Convolution Node without +/// a bias. If present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [input_channels * depth_multiplier] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, input_channels * depth_multiplier] dimensions. +/// @param flags - binary features of the 2D Depthwise Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_depthwise_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t depth_multiplier, + size_t input_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Depth To Space Node 2D and add it to a Subgraph. +/// +/// The Depth To Space 2D Node rearranges data from depth into blocks of spatial data (a reverse transform to +/// Space To Depth). For a given input pixel, an output square of pixels with side @a block_size is formed from values +/// in the corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times +/// smaller than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param block_size - the size of the spatial block. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_depth_to_space_2d( + xnn_subgraph_t subgraph, + uint32_t block_size, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +enum xnn_status xnn_define_depth_to_space( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t block_size, + uint32_t flags); + +/// Define a 1D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 1D Global Average Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_average_pooling_1d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 2D Global Average Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_average_pooling_2d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 1D Global Sum Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 1D Global Sum Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_sum_pooling_1d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Global Sum Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 2D Global Sum Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_sum_pooling_2d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Average Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_average_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Fully Connected Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the +/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least +/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if +/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be +/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of +/// [num_input_elements] dimensions, then reshaped into a 2D tensor of +/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph. +/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have +/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is +/// specified, the filter tensor must have [input_channels, output_channels] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias. +/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels] +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same +/// dimensionality as the input tensor, all its dimensions but the last one must match the +/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must +/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output +/// must be a 2D tensor of [batch_size, output_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of +/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param flags - binary features of the Fully Connected Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS. +enum xnn_status xnn_define_fully_connected( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Sparse Fully Connected Node and add it to a Subgraph. +/// +/// This operator is experimental, and will be removed in the future. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the +/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least +/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if +/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be +/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of +/// [num_input_elements] dimensions, then reshaped into a 2D tensor of +/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph. +/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have +/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is +/// specified, the filter tensor must have [input_channels, output_channels] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias. +/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels] +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same +/// dimensionality as the input tensor, all its dimensions but the last one must match the +/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must +/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output +/// must be a 2D tensor of [batch_size, output_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of +/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param flags - binary features of the Fully Connected Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS. +enum xnn_status xnn_define_fully_connected_sparse( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Max Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param dilation_height - dilation of pooling elements along the height dimension. +/// @param dilation_width - dilation of pooling elements along the width dimension. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Max Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_max_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D ArgMax Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. +/// @param pooling_height - pooling (kernel) height. Vertical stride between pooling regions match this value. +/// @param pooling_width - pooling (kernel) width. Horizontal stride between pooling regions match this value. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_value_id - Value ID for the output tensor with the maximum values in the pools. The output tensor must +/// be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] dimensions. +/// @param output_index_id - Value ID for the output tensor with the indexes of the maximum values in the pools. The +/// output tensor must be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] +/// dimensions. +/// @param flags - binary features of the 2D ArgMax Pooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_argmax_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_id, + uint32_t output_value_id, + uint32_t output_index_id, + uint32_t flags); + +/// Define a 2D UnPooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param pooling_height - height of the pooling window. +/// @param pooling_width - width of the pooling window. +/// @param input_value_id - Value ID for the input tensor with the max-pooling values to invert. The input value tensor +/// must be a 4D tensor defined in the @a subgraph with [N, IH, IW, channels] dimensions. +/// @param input_index_id - Value ID for the input tensor with the indices of the per-pool maximum values produced by +/// a 2D UnPooling Node. The input tensor must be a 4D tensor defined in the @a subgraph with +/// [N, IH, IW, channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D UnPooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_unpooling_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_value_id, + uint32_t input_index_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Add Node and add it to a Subgraph. +/// +/// The 2-Input Add Node computes elementwise addition of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Add Node. No supported flags are currently defined. +enum xnn_status xnn_define_add2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Multiply Node and add it to a Subgraph. +/// +/// The 2-Input Multiply Node computes elementwise multiplication of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Multiply Node. No supported flags are currently defined. +enum xnn_status xnn_define_multiply2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +// Cap operations applied to logits (Q * K) of attention operator. +enum xnn_attention_logits_cap_type { + // No capping. + xnn_attention_logits_cap_type_none = 0, + // Cap the absolute values of logits by tanh: tanh(logits / cap) * cap + xnn_attention_logits_cap_type_tanh +}; + +// Params when the cap type is xnn_attention_logits_cap_type_tanh. +struct xnn_attention_logits_cap_tanh_params { + float cap; +}; + +/// Define a Scaled Dot-Product Attention Node and add it to a Subgraph. +/// +/// This operator is experimental. +/// +/// The Scaled Dot-Product Attention Node computes a multi-head or multi-query scaled dot attention on the query, key, +/// and value tensors. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param cap_type - type of cap to be applied to the logits. +/// @param cap_params - parameters for the cap. Must be a pointer to xnn_attention_logits_cap_tanh_params if cap_type +/// is xnn_attention_logits_cap_type_tanh. +/// @param query_id - Value ID for the query tensor. The query tensor must be a 3+-dimensional tensor defined in the +/// @a subgraph with the dimensions as [*, H, T, C], where H/T/C are the heads/tokens/channels, and * +/// is the 0 or more dimensions treated as batch size. +/// @param key_id - Value ID for the key tensor. The key tensor must be a 2+--dimensional tensor defined in the +/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as +/// [*, H, U, C] (multi-head), or have 1 less dimension than the query, with the dimensions as +/// as [*, U, C] (multi-query, number of heads omitted implies single head), where H/U/C are the +/// heads/key_value_tokens/channels, and * is the 0 or more dimensions treated as batch size. These +/// batch size dimensions must be the same as query. +/// @param value_id - Value ID for the value tensor. The value tensor must be a 2+--dimensional tensor defined in the +/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as +/// [*, H, U, D] (multi-head), or have 1 less dimension than the query, with the dimensions as +/// as [*, U, D] (multi-query, number of heads omitted implies single head), where H/U/D are the +/// heads/key_value_tokens/value_channels, and * is the 0 or more dimensions treated as batch size. +/// These batch size dimensions must be the same as query and key. +/// @param scale_id - Value ID for the scale tensor. The scale tensor must be a 1D tensor defined in the @a subgraph +/// with [C] dimensions. The query tensor is multiplied with this scale tensor before the dot product +/// with the key tensor. +/// @param mask_id - Value ID for the mask tensor. The mask tensor must be a 2D tensor defined in the @a subgraph with +/// [T, U] dimensions. The mask tensor is added to the logits (query dot value). +/// @param output_id - Value ID for the output tensor. The output tensor must be a 3+-dimensional tensor defined in the +/// @a subgraph with the dimensions as [*, H, T, D], where H/T/D are the heads/tokens/value_channels, +/// and * is the 0 or more dimensions treated as batch size. These batch size dimensions must be the +/// same as query, key, and value. +/// @param flags - binary features of the Scaled Dot Product Attention Node. No supported flags are currently defined. +enum xnn_status xnn_define_scaled_dot_product_attention( + xnn_subgraph_t subgraph, + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t query_id, + uint32_t key_id, + uint32_t value_id, + uint32_t scale_id, + uint32_t mask_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Subtract Node and add it to a Subgraph. +/// +/// The Subtract Node computes elementwise subtraction of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Subtract Node. No supported flags are currently defined. +enum xnn_status xnn_define_subtract( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Divide Node and add it to a Subgraph. +/// +/// The Divide Node computes elementwise division of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Divide Node. No supported flags are currently defined. +enum xnn_status xnn_define_divide( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Maximum Node and add it to a Subgraph. +/// +/// The 2-Input Maximum Node computes elementwise maximum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Maximum Node. No supported flags are currently defined. +enum xnn_status xnn_define_maximum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Minimum Node and add it to a Subgraph. +/// +/// The 2-Input Minimum Node computes elementwise minimum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Minimum Node. No supported flags are currently defined. +enum xnn_status xnn_define_minimum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Squared Difference Node and add it to a Subgraph. +/// +/// The Squared Difference Node computes elementwise squared difference of two tensor inputs with numpy broadcasting +/// rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Squared Difference Node. No supported flags are currently defined. +enum xnn_status xnn_define_squared_difference( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Constant Pad Node with static padding specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param pre_paddings - number of padding elements to insert before input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param post_paddings - number of padding elements to insert after input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param padding_value - constant value used to initialize padding elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Constant Pad Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_constant_pad( + xnn_subgraph_t subgraph, + const size_t* pre_paddings, + const size_t* post_paddings, + float padding_value, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Mean Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_reduction_axes - number of axes along which mean is computed. +/// @param reduction_axes - axes along which mean is computed. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with at least +/// @a num_reduction_axes dimensions defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor defined in the +/// @a subgraph with @a num_reduction_axes fewer dimensions than the input tensor (if +/// XNN_FLAG_REDUCE_DIMS is specified), or has same dimension rank but the dimension at +/// @a reduction_axes reduced to 1 (if XNN_FLAG_REDUCE_DIMS is not specified). +/// @param flags - binary features of the Mean Node. The only currently supported value is XNN_FLAG_REDUCE_DIMS +enum xnn_status xnn_define_static_mean( + xnn_subgraph_t subgraph, + size_t num_reduction_axes, + const size_t* reduction_axes, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Concatenate Node and add it to a Subgraph. +/// +/// The 2-Input Concatenate Node concatenates two tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the two input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// second input. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// first input. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of both inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of both inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate2( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 3-Input Concatenate Node and add it to a Subgraph. +/// +/// The 3-Input Concatenate Node concatenates three tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the three input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate3( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 4-Input Concatenate Node and add it to a Subgraph. +/// +/// The 4-Input Concatenate Node concatenates four tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the four input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input4_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate4( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t input4_id, + uint32_t output_id, + uint32_t flags); + +enum xnn_status xnn_define_concatenate5( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t input4_id, + uint32_t input5_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Copy Node and add it to a Subgraph. +/// +/// The Copy Node copies an input tensor to an output tensor. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the first input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Copy Node. No supported flags are currently defined. +enum xnn_status xnn_define_copy( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Output Split Node and add it to a Subgraph. +/// +/// The 2-Output Split Node splits an input tensor into two output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second output. The split_dim dimension is half of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first output. The split_dim dimension is half of the input's split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split2( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t flags); + +/// Define a 3-Output Split Node and add it to a Subgraph. +/// +/// The 3-Output Split Node splits an input tensor into three output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second and third output. The split_dim dimension is one third of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the second and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split3( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t flags); + +/// Define a 4-Output Split Node and add it to a Subgraph. +/// +/// The 4-Output Split Node splits an input tensor into four output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the other output tensors. The split_dim dimension is one fourth of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output4_id - Value ID for the fourth output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split4( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t output4_id, + uint32_t flags); + +/// Define a Reshape Node with static shape specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the output tensor. +/// @param new_shape - shape dimensions of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Reshape Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_reshape( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* new_shape, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Node that reshapes a tensor to two dimensions, retaining the +/// trailing dimension, and add it to a Subgraph. +/// +/// This operator is experimental. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be +/// defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be +/// defined in the @a subgraph, and its +/// size must match the shape of the input tensor with +/// padding. +/// @param flags - binary features of the Reshape Node. No supported flags are +/// currently defined. +enum xnn_status xnn_define_reshape_2d(xnn_subgraph_t subgraph, + uint32_t input_id, uint32_t output_id, + uint32_t flags); + +/// Define a 2D Resize Bilinear Node with static output height & width specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param new_height - height dimension of the output tensor. +/// @param new_width - width dimension of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, C] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, new_height, new_width, C] dimensions. +/// @param flags - binary features of the 2D Resize Bilinear Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS, which are mutually exclusive. +enum xnn_status xnn_define_static_resize_bilinear_2d( + xnn_subgraph_t subgraph, + size_t new_height, + size_t new_width, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a PReLU (Parametric ReLU) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param slope_id - Value ID for the slope tensor. The slope tensor must be a 1D tensor defined in the @a subgraph with +/// [channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param flags - binary features of the PReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_prelu( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t slope_id, + uint32_t output_id, + uint32_t flags); + +/// Define a RoPE (Rotary Positional Embeddings) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param max_tokens - maximum possible number of tokens (maximum sequence length) of the input/output tensors. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [batch, tokens, heads, channels] dimensions. +/// @param weights_id - Value ID for the weights tensor. The weights tensor must be a 2D tensor defined in the +/// @a subgraph with [max_tokens, channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [batch, tokens, heads, channels] dimensions. +/// @param flags - binary features of the RoPE Node. No supported flags are currently defined. +enum xnn_status xnn_define_rope( + xnn_subgraph_t subgraph, + size_t max_sequence_size, + uint32_t input_id, + uint32_t weights_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Abs Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Abs Node. No supported flags are currently defined. +enum xnn_status xnn_define_abs( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Bankers' Rounding Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Bankers' Rounding Node. No supported flags are currently defined. +enum xnn_status xnn_define_bankers_rounding( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Batch Matrix Multiply Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the second input +/// tensor. The last 2 dimensions are [M, K]. If XNN_FLAG_TRANSPOSE_B is not specified, the last +/// dimension must match the second last dimension of the second input tensor. If +/// XNN_FLAG_TRANSPOSE_B is specified, the last dimension must match the last dimension of the +/// second input tensor. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined +/// in the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first input +/// tensor. If XNN_FLAG_TRANSPOSE_B is not specified, the last 2 dimensions are [K, N], and the +/// second last dimension must match the last dimension of the first input tensor. If +/// XNN_FLAG_TRANSPOSE_B is specified, the last 2 dimensions are [N, K], and the last dimension must +/// match the last dimension of the first input tensor. +/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined in the +/// @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first and second +/// input tensors . The last 2 dimensions must be [M, N]. +/// @param flags - binary features of the Batch Matrix Multiply Node. The only currently supported value is +/// XNN_FLAG_TRANSPOSE_B. +enum xnn_status xnn_define_batch_matrix_multiply( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Ceiling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Ceiling Node. No supported flags are currently defined. +enum xnn_status xnn_define_ceiling( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Clamp Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Clamp Node. No supported flags are currently defined. +enum xnn_status xnn_define_clamp( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define an ELU (Exponential Linear Unit) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param alpha - scale factor for negative output elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the ELU Node. No supported flags are currently defined. +enum xnn_status xnn_define_elu( + xnn_subgraph_t subgraph, + float alpha, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Floor Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Floor Node. No supported flags are currently defined. +enum xnn_status xnn_define_floor( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a HardSwish Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the HardSwish Node. No supported flags are currently defined. +enum xnn_status xnn_define_hardswish( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Leaky ReLU Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param negative_slope - scale factor for negative input elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Leaky ReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_leaky_relu( + xnn_subgraph_t subgraph, + float negative_slope, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Negate Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Negate Node. No supported flags are currently defined. +enum xnn_status xnn_define_negate( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Sigmoid Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Sigmoid Node. No supported flags are currently defined. +enum xnn_status xnn_define_sigmoid( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a SoftMax Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph, and have at +/// least one dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the SoftMax Node. No supported flags are currently defined. +enum xnn_status xnn_define_softmax( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Space To Depth 2D Node and add it to a Subgraph. +/// +/// The Space To Depth 2D Node rearranges blocks of spatial data into blocks (a reverse transform to Depth To Space 2D). +/// For a given input pixel, an output square of pixels with side @a block_size is formed from values in the +/// corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times greater +/// than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param block_size - the size of the spatial block. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_space_to_depth_2d( + xnn_subgraph_t subgraph, + uint32_t block_size, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Node. No supported flags are currently defined. +enum xnn_status xnn_define_square( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Root Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Root Node. No supported flags are currently defined. +enum xnn_status xnn_define_square_root( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Reciprocal Square Root Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be +/// defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be +/// defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Root Node. No supported flags +/// are currently defined. +enum xnn_status xnn_define_reciprocal_square_root(xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Slice Node add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the input and output tensor. +/// @param offsets - offsets in each dimension of the input tensor. This array must have @a num_dims elements. +/// @param sizes - size of each dimension in output tensor. This array must have @a num_dims elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// dimensions must match @a sizes. +/// @param flags - binary features of the Static Slice Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_slice( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* offsets, + const size_t* sizes, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Transpose Node and add it to a Subgraph. +/// +/// The Static Transpose Node applies a generalized transpose to the input tensor using the permuation in perm. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to its corresponding permuted input dimension. +/// @param num_dims - the number of permutation dimensions. This must be equal to the number of input dimensions. +/// @param perm - The permutation of the axis of the input tensor. The perm array must must contain 0 to N-1 in the +/// permuted order. +/// @param flags - binary features of the Static Transpose Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_transpose( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* perm, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Tanh Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Tanh Node. No supported flags are currently defined. +enum xnn_status xnn_define_tanh( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Code cache is a cache for JIT generated code. +typedef struct xnn_code_cache* xnn_code_cache_t; + +/// Weights cache can be finalized in these ways: +enum xnn_weights_cache_finalization_kind { + /// Weights cache is finalized, no insert operations into the weights cache is allowed, even if the "inserted" + /// weights already exist in thee cache. Weights cache memory will also be trimmed to page boundary and set to + /// read-only (to prevent writes). + xnn_weights_cache_finalization_kind_hard, + /// Weights cache will be finalized with some extra space at the end, this allows for "inserting" into the cache only + /// if the weights are already in the cache, and errors on inserting uncached weights. There is memory overhead. + xnn_weights_cache_finalization_kind_soft, +}; + +/// A combination of multiple factors to uniquely locate the weights cache. +struct xnn_weights_cache_look_up_key { + /// The unique seed for each ukernel. It is guaranteed that each ukernel provides + /// a consistent and identical seed. + uint32_t seed; + /// Pointer to the original kernel. + const void* kernel; + /// Pointer to the original bias, could be NULL. + const void* bias; +}; + +/// A group of function pointers to manage weights cache. All functions may be +/// called on multi threads. +struct xnn_weights_cache_provider { + /// User-specified pointer that will be passed as-is to all functions in this + /// structure. + void* context; + + /// Looks up the tuple of {cache_key, kernel, bias} in the cache. If it is found, + /// returns the offset to the found entry for reuse. Otherwise, returns SIZE_MAX. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param cache_key - The key used to locate the weights cache entry. + size_t (*look_up)(void* context, const struct xnn_weights_cache_look_up_key* cache_key); + + /// Ensures that cache has enough space for `n` bytes. Returns the address to + /// store weight cache. Returns NULL if fails to reserve space. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param n - size to be reserved. + void* (*reserve_space)(void* context, size_t n); + + /// Looks up packed weights at `ptr` in the cache. If it is found, reuse it. + /// Otherwise, it is added to the cache. Returns the offset to the cache. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param cache_key - The key used to locate the weights cache entry. + /// @param ptr - pointer pointing to the packed weight. + /// @param size - size of the packed weight. + size_t (*look_up_or_insert)(void* context, const struct xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size); + + /// Returns whether the cache is finalized. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + bool (*is_finalized)(void* context); + + /// Returns the absolute pointer corresponding to `offset`, where the offset is returned from + /// `look_up` or `get_or_insert`. This function must be called after finalize. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param offset - offset to the start of internal buffer + void* (*offset_to_addr)(void* context, size_t offset); + + /// Destroy a weights cache object, as well as memory used for the cache. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + enum xnn_status (*delete_cache)(void* context); +}; + +/// Weights cache is a cache for packed weights. It can be reused between runtimes. +typedef struct xnn_weights_cache_provider* xnn_weights_cache_t; + +/// Create a weights cache object specifying the initial size of weights cache (in bytes). +/// +/// @param[in] size - initial capacity of the weights cache (in bytes), i.e. it can hold size bytes without growing. +/// @param weights_cache_out - pointer to the variable that will be initialized to a handle to the weights cache provider +/// upon successful return. Once created, the weights cache provider can be shared between +/// different Runtime objects. +enum xnn_status xnn_create_weights_cache_with_size(size_t size, xnn_weights_cache_t* weights_cache_out); + +enum xnn_status xnn_create_weights_cache(xnn_weights_cache_t* weights_cache_out); + +/// Finalizes the weights cache. The kind of finalization is specified by `finalization_kind`. +/// @param weights_cache - the weights cache object to finalize. +/// @param finalization_kind - the kind of finalization. +enum xnn_status xnn_finalize_weights_cache( + xnn_weights_cache_t weights_cache, + enum xnn_weights_cache_finalization_kind finalization_kind); + +/// Destroy a weights cache object, as well as memory used for the cache. +/// @param weights_cache - the weights cache object to destroy. +enum xnn_status xnn_delete_weights_cache(xnn_weights_cache_t weights_cache); + +typedef struct xnn_workspace* xnn_workspace_t; + +/// Create a workspace object. +/// @param workspace_out - pointer to the variable that will be initialized to a handle to the workspace object upon +/// successful return. Once created, the workspace can be shared between different Runtime +/// objects. +enum xnn_status xnn_create_workspace(xnn_workspace_t* workspace_out); +/// Destroy a workspace object, as well as memory used by the workspace. Object destruction can be deferred until all +/// Runtime objects created with this workspace are destroyed. +/// @param workspace - the workspace object to destroy. +enum xnn_status xnn_release_workspace(xnn_workspace_t workspace); + +/// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values. +typedef struct xnn_runtime* xnn_runtime_t; + +enum xnn_profile_info { + /// Returns a size_t containing the number of operators. + xnn_profile_info_num_operators, + /// Returns a char[] containing the null character separated names of all operators. + xnn_profile_info_operator_name, + /// Returns a uint64_t[] with the runtimes of all operators in the same order as xnn_profile_info_operator_name. + xnn_profile_info_operator_timing, +}; + +/// Return profile information for all operators. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime, @ref xnn_create_runtime_v2 or +/// @ref xnn_create_runtime_v3. +/// @param param_name - type of profile information required. +/// @param param_value_size - the size in bytes of memory pointed to by param_value. If this is not sufficient then +/// param_value_size_ret will be set to the required size and xnn_status_out_of_memory will be +/// returned. +/// @param param_value - a pointer to memory location where appropriate values for a given param_value will be written. +/// @param param_value_size_ret - returns number of bytes required to write the result if param_value_size is not +/// sufficient. +enum xnn_status xnn_get_runtime_profiling_info(xnn_runtime_t runtime, + enum xnn_profile_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +/// Create a Runtime object from a subgraph. +/// +/// @param subgraph - a Subgraph object with all Values and Nodes that would be handled by the runtime. No Values or +/// Nodes can be added to the runtime once it is constructed. +/// @param weights_cache - a cache for packed weights. The runtime will look up and reuse packed weights in this cache, +/// this will reduce memory allocated for packed weights. +/// @param workspace - a workspace to hold internal tensors. The runtime will allocate space used for internal tensors +/// and track them using workspace. Workspace can be shared and reused across different runtimes. If +/// workspace is NULL, there will be no sharing: each runtime has its own workspace. +/// @param threadpool - the thread pool to be used for parallelisation of computations in the runtime. If the thread +/// pool is NULL, the computation would run on the caller thread without parallelization. +/// @param flags - binary features of the runtime. The only currently supported values are +/// XNN_FLAG_HINT_SPARSE_INFERENCE, XNN_FLAG_HINT_FP16_INFERENCE, XNN_FLAG_FORCE_FP16_INFERENCE, +/// XNN_FLAG_YIELD_WORKERS, and XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER. If XNN_FLAG_YIELD_WORKERS is +/// specified, worker threads would be yielded to the system scheduler after processing the last operator +/// in the Runtime. If XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER is specified, convolution operators will +/// initialize indirection buffers on each inference run using temporary memory in the workspace, instead +/// of initializing persistent indirection buffers once. +/// @param runtime_out - pointer to the variable that will be initialized with a handle to the Runtime object upon +/// successful return. Once constructed, the Runtime object is independent of the Subgraph object +/// used to create it. +enum xnn_status xnn_create_runtime_v4( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + xnn_workspace_t workspace, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v3( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v2( + xnn_subgraph_t subgraph, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime( + xnn_subgraph_t subgraph, + xnn_runtime_t* runtime_out); + +struct xnn_external_value { + uint32_t id; + void* data; +}; + +/// Reshape an external value. +/// +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_reshape_external_value( + xnn_runtime_t runtime, + uint32_t external_id, + size_t num_dims, + const size_t* dims); + +/// Get the external value shape. +/// +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. The external ID can not be XNN_INVALID_VALUE_ID. +/// @param num_dims - A valid pointer into which the number of dimensions in the shape will be written. It can not be larger than XNN_MAX_TENSOR_DIMS. +/// @param dims - pointer to an array of @a num_dims shape dimensions. This pointer can't be NULL. It must be large enough to hold +/// at least @a num_dims elements. XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_get_external_value_shape( + xnn_runtime_t runtime, + uint32_t external_id, + size_t* num_dims, + size_t* dims); + +/// Reshape the XNNPACK runtime. +/// +/// Propgates the shapes of input tensors through the graph to determine the shapes of intermediate and output tensors. +/// Memory is allocated if required. Output tensor shapes are returned by xnn_get_external_value_shape. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +enum xnn_status xnn_reshape_runtime( + xnn_runtime_t runtime); + +/// Deprecated. Use xnn_reshape_runtime and xnn_setup_runtime_v2. +/// +/// Setup data pointers for external inputs and outputs in a Runtime object and +/// allocate memory. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must +/// match the number of external inputs and outputs in the runtime, i.e. all external +/// inputs and outputs in the runtime must be specified in one call. +/// @param external_values - array with location information for all external inputs and outputs in the runtime. +enum xnn_status xnn_setup_runtime( + xnn_runtime_t runtime, + size_t num_external_values, + const struct xnn_external_value* external_values); + +/// Setup data pointers for external inputs and outputs in a Runtime object. +/// Should be called after xnn_reshape_runtime. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must +/// match the number of external inputs and outputs in the runtime, i.e. all external +/// inputs and outputs in the runtime must be specified in one call. +/// @param external_values - array with location information for all external inputs and outputs in the runtime. +enum xnn_status xnn_setup_runtime_v2( + xnn_runtime_t runtime, + size_t num_external_values, + const struct xnn_external_value* external_values); + +/// Execute forward pass for all operators in the runtime. +/// +/// @param runtime - the Runtime object with the execution plan to invoke. +enum xnn_status xnn_invoke_runtime( + xnn_runtime_t runtime); + +/// Destroy a Runtime object, as well as operators and memory associated with it. +/// +/// @param runtime - the Runtime object to destroy. +enum xnn_status xnn_delete_runtime( + xnn_runtime_t runtime); + +typedef struct xnn_operator* xnn_operator_t; + +enum xnn_status xnn_run_operator( + xnn_operator_t op, + pthreadpool_t threadpool); + +enum xnn_status xnn_delete_operator( + xnn_operator_t op); + + +/// Operator API: +/// - create operator will create and populate a xnn_operator_t +/// - reshape operator will update fields in xnn_operator_t with shape/dimensions and parallelization information +/// - setup operator will update pointers to input and outputs +/// Each supported operator must have a create, reshape, and setup function. (Optionally a run function.) +/// Operators listed below are in alphabetical order by operator name; within each operator, we sort alphabetically by +/// data layout and type. We also group create, reshape, setup (and optionally run) functions of each operator together. + +enum xnn_status xnn_create_abs_nc_f16( + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_reshape_abs_nc_f16( + xnn_operator_t abs_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_abs_nc_f16( + xnn_operator_t abs_op, + const void* input, + void* output); + +enum xnn_status xnn_create_abs_nc_f32( + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_reshape_abs_nc_f32( + xnn_operator_t abs_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_abs_nc_f32( + xnn_operator_t abs_op, + const float* input, + float* output); + +enum xnn_status xnn_run_abs_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_f16( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_f16( + xnn_operator_t add_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_add_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_f32( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_f32( + xnn_operator_t add_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_add_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_qs8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_qs8( + xnn_operator_t add_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_add_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_qu8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_qu8( + xnn_operator_t add_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_add_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_argmax_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t flags, + xnn_operator_t* argmax_pooling_op_out); + +enum xnn_status xnn_reshape_argmax_pooling2d_nhwc_f32( + xnn_operator_t argmax_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_argmax_pooling2d_nhwc_f32( + xnn_operator_t argmax_pooling_op, + void* workspace, + const float* input, + float* output, + uint32_t* index); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_f16( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f16( + xnn_operator_t average_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_f32( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f32( + xnn_operator_t average_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_average_pooling2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_qu8( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_qu8( + xnn_operator_t average_pooling_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_bankers_rounding_nc_f16( + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_reshape_bankers_rounding_nc_f16( + xnn_operator_t rounding_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_bankers_rounding_nc_f16( + xnn_operator_t rounding_op, + const void* input, + void* output); + +enum xnn_status xnn_create_bankers_rounding_nc_f32( + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_reshape_bankers_rounding_nc_f32( + xnn_operator_t rounding_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_bankers_rounding_nc_f32( + xnn_operator_t rounding_op, + const float* input, + float* output); + +enum xnn_status xnn_run_bankers_rounding_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_batch_matrix_multiply_nc_f16( + uint32_t flags, + xnn_operator_t* batch_matrix_multiply_op); + +enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f16( + xnn_operator_t batch_matrix_multiply_op, + size_t batch_size, + size_t m, + size_t k, + size_t n, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_batch_matrix_multiply_nc_f16( + xnn_operator_t batch_matrix_multiply_op, + void* workspace, + const void* lhs_input, + const void* rhs_input, + void* output); + +enum xnn_status xnn_create_batch_matrix_multiply_nc_f32( + uint32_t flags, + xnn_operator_t* batch_matrix_multiply_op); + +enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f32( + xnn_operator_t batch_matrix_multiply_op, + size_t batch_size, + size_t m, + size_t k, + size_t n, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_batch_matrix_multiply_nc_f32( + xnn_operator_t batch_matrix_multiply_op, + void* workspace, + const float* lhs_input, + const float* rhs_input, + float* output); + +enum xnn_status xnn_create_ceiling_nc_f16( + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_reshape_ceiling_nc_f16( + xnn_operator_t ceiling_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_ceiling_nc_f16( + xnn_operator_t ceiling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_ceiling_nc_f32( + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_run_ceiling_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_ceiling_nc_f32( + xnn_operator_t ceiling_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_ceiling_nc_f32( + xnn_operator_t ceiling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_reshape_channel_shuffle_nc_x8( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_channel_shuffle_nc_x8( + xnn_operator_t channel_shuffle_op, + const void* input, + void* output); + +enum xnn_status xnn_create_channel_shuffle_nc_x32( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_reshape_channel_shuffle_nc_x32( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_channel_shuffle_nc_x32( + xnn_operator_t channel_shuffle_op, + const void* input, + void* output); + +enum xnn_status xnn_create_clamp_nc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_f16( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_f16( + xnn_operator_t clamp_op, + const void* input, + void* output); + +enum xnn_status xnn_create_clamp_nc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_f32( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_f32( + xnn_operator_t clamp_op, + const float* input, + float* output); + +enum xnn_status xnn_run_clamp_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_clamp_nc_s8( + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_s8( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_s8( + xnn_operator_t clamp_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_clamp_nc_u8( + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_u8( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_u8( + xnn_operator_t clamp_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_constant_pad_nd_x8( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x8( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x8( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x8( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x16( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x16( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x16( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x16( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x32( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x32( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x32( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x32( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f16_f32( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f16_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f16_f32( + xnn_operator_t convert_op, + const void* input, + float* output); + +enum xnn_status xnn_run_convert_nc_f16_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const void* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f16_qd8( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f16_qd8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries. +enum xnn_status xnn_setup_convert_nc_f16_qd8( + xnn_operator_t convert_op, + const void* input, + int8_t* output, + struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_create_convert_nc_f32_qd8( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qd8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries. +enum xnn_status xnn_setup_convert_nc_f32_qd8( + xnn_operator_t convert_op, + const float* input, + int8_t* output, + struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_create_convert_nc_f32_f16( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_f16( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_f16( + xnn_operator_t convert_op, + const float* input, + void* output); + +enum xnn_status xnn_run_convert_nc_f32_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qs8( + float output_scale, + int8_t output_zero_point, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_qs8( + xnn_operator_t convert_op, + const float* input, + int8_t* output); + +enum xnn_status xnn_run_convert_nc_f32_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + int8_t* output, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qu8( + float output_scale, + uint8_t output_zero_point, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qu8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_qu8( + xnn_operator_t convert_op, + const float* input, + uint8_t* output); + +enum xnn_status xnn_run_convert_nc_f32_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + uint8_t* output, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs8( + float input_scale, + int8_t input_zero_point, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8( + xnn_operator_t convert_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convert_nc_qs8_f16( + float input_scale, + int8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8_f16( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8_f16( + xnn_operator_t convert_op, + const int8_t* input, + void* output); + +enum xnn_status xnn_create_convert_nc_qs8_f32( + float input_scale, + int8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8_f32( + xnn_operator_t convert_op, + const int8_t* input, + float* output); + +enum xnn_status xnn_run_convert_nc_qs8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const int8_t* input, + float* output, + float input_scale, + int8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs16_qs8( + float input_scale, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs16_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs16_qs8( + xnn_operator_t convert_op, + const int16_t* input, + int8_t* output); + +enum xnn_status xnn_run_convert_nc_qs16_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const int16_t* input, + int8_t* output, + float input_scale, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qu8( + float input_scale, + uint8_t input_zero_point, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qu8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qu8( + xnn_operator_t convert_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_convert_nc_qu8_f32( + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qu8_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qu8_f32( + xnn_operator_t convert_op, + const uint8_t* input, + float* output); + +enum xnn_status xnn_run_convert_nc_qu8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint8_t* input, + float* output, + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nchw_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nchw_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nchw_f16( + xnn_operator_t convolution_op, + const void* input, + void* output); + +enum xnn_status xnn_create_convolution2d_nchw_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nchw_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nchw_f32( + xnn_operator_t convolution_op, + const float* input, + float* output); + +enum xnn_status xnn_create_convolution2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_f16( + xnn_operator_t convolution_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +// Forward declare. +struct xnn_post_operation; + +/// Create a convolution operator with a number of post operations. The +/// convolution operator created using this function does not have output_min +/// and output_max. The list of operators in post_operations will be applied in +/// order. Convolution with post operations is only supported on JIT platforms +/// and when JIT is enabled. +enum xnn_status xnn_create_fused_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + size_t num_post_operations, + struct xnn_post_operation* post_operations, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_f32( + xnn_operator_t convolution_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qd8_f16_qc8w( + uint32_t input_padding_top, uint32_t input_padding_right, + uint32_t input_padding_bottom, uint32_t input_padding_left, + uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height, + uint32_t subsampling_width, uint32_t dilation_height, + uint32_t dilation_width, uint32_t groups, size_t group_input_channels, + size_t group_output_channels, size_t input_channel_stride, + size_t output_channel_stride, const float* kernel_scale, + const int8_t* kernel, const float* bias, float output_min, float output_max, + uint32_t flags, xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_create_convolution2d_nhwc_qd8_f32_qc8w( + uint32_t input_padding_top, uint32_t input_padding_right, + uint32_t input_padding_bottom, uint32_t input_padding_left, + uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height, + uint32_t subsampling_width, uint32_t dilation_height, + uint32_t dilation_width, uint32_t groups, size_t group_input_channels, + size_t group_output_channels, size_t input_channel_stride, + size_t output_channel_stride, const float* kernel_scale, + const int8_t* kernel, const float* bias, float output_min, float output_max, + uint32_t flags, xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_create_convolution2d_nhwc_qs8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f16_qc8w( + xnn_operator_t convolution_op, size_t batch_size, size_t input_height, + size_t input_width, size_t* workspace_size, size_t* workspace_alignment, + size_t* output_height_out, size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f32_qc8w( + xnn_operator_t convolution_op, size_t batch_size, size_t input_height, + size_t input_width, size_t* workspace_size, size_t* workspace_alignment, + size_t* output_height_out, size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qs8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f16_qc8w( + xnn_operator_t convolution_op, void* workspace, const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f32_qc8w( + xnn_operator_t convolution_op, void* workspace, const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_setup_convolution2d_nhwc_qs8( + xnn_operator_t convolution_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qs8_qc8w( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + const float* kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qs8_qc8w( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qs8_qc8w( + xnn_operator_t convolution_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qu8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qu8( + xnn_operator_t convolution_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_copy_nc_x8( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x8( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x8( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_create_copy_nc_x16( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x16( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x16( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_create_copy_nc_x32( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x32( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x32( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_run_copy_nc_x32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint32_t* input, + uint32_t* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f16( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_f16( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f16( + xnn_operator_t deconvolution_op, + const void* input, + void* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f32( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_f32( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f32( + xnn_operator_t deconvolution_op, + const float* input, + float* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qs8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_qs8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qs8( + xnn_operator_t deconvolution_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qu8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_qu8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qu8( + xnn_operator_t deconvolution_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x16( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x32( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x8( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x8( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x8( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x16( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x32( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_divide_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_reshape_divide_nd_f16( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_divide_nd_f16( + xnn_operator_t divide_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_divide_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_reshape_divide_nd_f32( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_divide_nd_f32( + xnn_operator_t divide_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_divide_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_dynamic_fully_connected_nc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* dynamic_fully_connected_op_out); + +enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f16( + xnn_operator_t dynamic_fully_connected_op, + size_t batch_size, + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_dynamic_fully_connected_nc_f16( + xnn_operator_t dynamic_fully_connected_op, + void* workspace, + const void* input, + const void* kernel, + const void* bias, + void* output); + +enum xnn_status xnn_create_dynamic_fully_connected_nc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* dynamic_fully_connected_op_out); + +enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f32( + xnn_operator_t dynamic_fully_connected_op, + size_t batch_size, + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_dynamic_fully_connected_nc_f32( + xnn_operator_t dynamic_fully_connected_op, + void* workspace, + const float* input, + const float* kernel, + const float* bias, + float* output); + +enum xnn_status xnn_create_elu_nc_f16( + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_f16( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_f16( + xnn_operator_t elu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_elu_nc_f32( + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_f32( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_f32( + xnn_operator_t elu_op, + const float* input, + float* output); + +enum xnn_status xnn_run_elu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float alpha, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_elu_nc_qs8( + float alpha, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_qs8( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_qs8( + xnn_operator_t elu_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_floor_nc_f16( + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_reshape_floor_nc_f16( + xnn_operator_t floor_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_floor_nc_f16( + xnn_operator_t floor_op, + const void* input, + void* output); + +enum xnn_status xnn_create_floor_nc_f32( + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_reshape_floor_nc_f32( + xnn_operator_t floor_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_floor_nc_f32( + xnn_operator_t floor_op, + const float* input, + float* output); + +enum xnn_status xnn_run_floor_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_f16( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f16( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f16( + xnn_operator_t fully_connected_op, + const void* input, + void* output); + +enum xnn_status xnn_create_fully_connected_nc_f32( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_f32_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const uint8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32_qc4w( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_f32_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32_qc8w( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const void* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc4w( + xnn_operator_t fully_connected_op, + const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const void* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc4w( + xnn_operator_t fully_connected_op, + const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qs8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qs8( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qs8( + xnn_operator_t fully_connected_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_fully_connected_nc_qs8_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + const float* kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qs8_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qs8_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_fully_connected_nc_qu8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qu8( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qu8( + xnn_operator_t fully_connected_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_global_average_pooling_ncw_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_ncw_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f16( + xnn_operator_t global_average_pooling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_global_average_pooling_ncw_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_ncw_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f32( + xnn_operator_t global_average_pooling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f16( + xnn_operator_t global_average_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f32( + xnn_operator_t global_average_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_qs8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qs8( + xnn_operator_t global_average_pooling_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_qu8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qu8( + xnn_operator_t global_average_pooling_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_global_sum_pooling_nwc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_sum_pooling_op_out); + +enum xnn_status xnn_reshape_global_sum_pooling_nwc_f16( + xnn_operator_t global_sum_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_sum_pooling_nwc_f16( + xnn_operator_t global_sum_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_global_sum_pooling_nwc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_sum_pooling_op_out); + +enum xnn_status xnn_reshape_global_sum_pooling_nwc_f32( + xnn_operator_t global_sum_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_sum_pooling_nwc_f32( + xnn_operator_t global_sum_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_hardswish_nc_f16( + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_reshape_hardswish_nc_f16( + xnn_operator_t hardswish_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_hardswish_nc_f16( + xnn_operator_t hardswish_op, + const void* input, + void* output); + +enum xnn_status xnn_create_hardswish_nc_f32( + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_reshape_hardswish_nc_f32( + xnn_operator_t hardswish_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_hardswish_nc_f32( + xnn_operator_t hardswish_op, + const float* input, + float* output); + +enum xnn_status xnn_run_hardswish_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_f16( + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_f16( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_f16( + xnn_operator_t leaky_relu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_leaky_relu_nc_f32( + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_f32( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_f32( + xnn_operator_t leaky_relu_op, + const float* input, + float* output); + +enum xnn_status xnn_run_leaky_relu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float negative_slope, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_qs8( + float negative_slope, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_qs8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_qs8( + xnn_operator_t leaky_relu_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_leaky_relu_nc_qu8( + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_qu8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_qu8( + xnn_operator_t leaky_relu_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_f16( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f16( + xnn_operator_t max_pooling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_f32( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f32( + xnn_operator_t max_pooling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_s8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_s8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_s8( + xnn_operator_t max_pooling_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_u8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_u8( + xnn_operator_t max_pooling_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_maximum_nd_f16( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_reshape_maximum_nd_f16( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_maximum_nd_f16( + xnn_operator_t maximum_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_maximum_nd_f32( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_reshape_maximum_nd_f32( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_maximum_nd_f32( + xnn_operator_t maximum_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_maximum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_mean_nd_f16( + uint32_t flags, + xnn_operator_t* mean_op_out); + +enum xnn_status xnn_reshape_mean_nd_f16( + xnn_operator_t mean_op, + size_t num_reduction_axes, + const size_t* reduction_axes, + size_t num_input_dims, + const size_t* input_shape, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_mean_nd_f16( + xnn_operator_t mean_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_mean_nd_f32( + uint32_t flags, + xnn_operator_t* mean_op_out); + +enum xnn_status xnn_reshape_mean_nd_f32( + xnn_operator_t mean_op, + size_t num_reduction_axes, + const size_t* reduction_axes, + size_t num_input_dims, + const size_t* input_shape, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_mean_nd_f32( + xnn_operator_t mean_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_minimum_nd_f16( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_reshape_minimum_nd_f16( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_minimum_nd_f16( + xnn_operator_t minimum_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_minimum_nd_f32( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_reshape_minimum_nd_f32( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_minimum_nd_f32( + xnn_operator_t minimum_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_minimum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_f16( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_f16( + xnn_operator_t multiply_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_multiply_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_f32( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_f32( + xnn_operator_t multiply_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_multiply_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_qs8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_qs8( + xnn_operator_t multiply_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_multiply_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_qu8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_qu8( + xnn_operator_t multiply_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_multiply_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_negate_nc_f16( + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_reshape_negate_nc_f16( + xnn_operator_t negate_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_negate_nc_f16( + xnn_operator_t negate_op, + const void* input, + void* output); + +enum xnn_status xnn_create_negate_nc_f32( + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_reshape_negate_nc_f32( + xnn_operator_t negate_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_negate_nc_f32( + xnn_operator_t negate_op, + const float* input, + float* output); + +enum xnn_status xnn_run_negate_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_prelu_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + const void* negative_slope, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_reshape_prelu_nc_f16( + xnn_operator_t prelu_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_prelu_nc_f16( + xnn_operator_t prelu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_prelu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + const float* negative_slope, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_reshape_prelu_nc_f32( + xnn_operator_t prelu_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_prelu_nc_f32( + xnn_operator_t prelu_op, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f32( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f32( + xnn_operator_t resize_op, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f16( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f16( + xnn_operator_t resize_op, + const void* input, + void* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f16( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f16( + xnn_operator_t resize_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f32( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f32( + xnn_operator_t resize_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_s8( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_s8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_s8( + xnn_operator_t resize_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_u8( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_u8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_u8( + xnn_operator_t resize_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_rope_nthc_f16( + size_t max_tokens, + uint32_t flags, + xnn_operator_t* rope_op_out); + +enum xnn_status xnn_reshape_rope_nthc_f16( + xnn_operator_t rope_op, + size_t batch_size, + size_t tokens, + size_t heads, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_rope_nthc_f16( + xnn_operator_t rope_op, + const void* input, + const void* weights, + void* output); + +enum xnn_status xnn_create_rope_nthc_f32( + size_t max_tokens, + uint32_t flags, + xnn_operator_t* rope_op_out); + +enum xnn_status xnn_reshape_rope_nthc_f32( + xnn_operator_t rope_op, + size_t batch_size, + size_t tokens, + size_t heads, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_rope_nthc_f32( + xnn_operator_t rope_op, + const float* input, + const float* weights, + float* output); + +// N: batch size +// H: number of heads +// T: tokens (sequence length) +// C: channels (head dimension) +enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f16( + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t flags, + xnn_operator_t* attention_op_out); + +enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f16( + xnn_operator_t attention_op, + size_t batch_size, + size_t query_heads, + // Number of tokens in query. + size_t query_tokens, + size_t key_value_heads, + // Number of tokens in key/value. For self-attention, this is same as tokens. + size_t key_value_tokens, + size_t query_key_channels, + size_t value_channels, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +// Query is of dimension [batch_size, query_heads, query_tokens, channels]. +// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, channels]. +// Scale is of dimension [channels]. +// Mask is of dimension [query_tokens, key_value_tokens]. +enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f16( + xnn_operator_t attention_op, + void* workspace, + const void* query, + const void* key, + const void* value, + const void* scale, + const void* mask, + void* output); + +// N: batch size +// H: number of heads +// T: tokens (sequence length) +// C: channels (head dimension) +enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f32( + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t flags, + xnn_operator_t* attention_op_out); + +enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f32( + xnn_operator_t attention_op, + size_t batch_size, + size_t query_heads, + // Number of tokens in query. + size_t query_tokens, + size_t key_value_heads, + // Number of tokens in key/value. For self-attention, this is same as tokens. + size_t key_value_tokens, + size_t query_key_channels, + size_t value_channels, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +// Query is of dimension [batch_size, query_heads, query_tokens, query_key_channels]. +// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, query_key_channels]. +// Scale is of dimension [query_key_channels]. +// Mask is of dimension [query_tokens, key_value_tokens]. +// Output is of dimension [batch_size, query_heads, query_tokens, value_channels]. +enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f32( + xnn_operator_t attention_op, + void* workspace, + const float* query, + const float* key, + const float* value, + const float* scale, + const float* mask, + float* output); + +enum xnn_status xnn_create_sigmoid_nc_f16( + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_f16( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_f16( + xnn_operator_t sigmoid_op, + const void* input, + void* output); + +enum xnn_status xnn_create_sigmoid_nc_f32( + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_f32( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_f32( + xnn_operator_t sigmoid_op, + const float* input, + float* output); + +enum xnn_status xnn_run_sigmoid_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_qs8( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_qs8( + xnn_operator_t sigmoid_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_sigmoid_nc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_qu8( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_qu8( + xnn_operator_t sigmoid_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_slice_nd_x16( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x16( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x16( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_create_slice_nd_x32( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x32( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x32( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_run_slice_nd_x32( + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_softmax_nc_f16( + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_f16( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_f16( + xnn_operator_t softmax_op, + const void* input, + void* output); + +enum xnn_status xnn_create_softmax_nc_f32( + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_f32( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_f32( + xnn_operator_t softmax_op, + const float* input, + float* output); + +enum xnn_status xnn_create_softmax_nc_qu8( + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_qu8( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_qu8( + xnn_operator_t softmax_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x16( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x16( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x32( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x32( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_nc_f16( + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_reshape_square_nc_f16( + xnn_operator_t square_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_nc_f16( + xnn_operator_t square_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_nc_f32( + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_reshape_square_nc_f32( + xnn_operator_t square_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_nc_f32( + xnn_operator_t square_op, + const float* input, + float* output); + +enum xnn_status xnn_run_square_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_root_nc_f16( + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_square_root_nc_f16( + xnn_operator_t sqrt_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_root_nc_f16( + xnn_operator_t sqrt_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_root_nc_f32( + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_square_root_nc_f32( + xnn_operator_t sqrt_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_root_nc_f32( + xnn_operator_t sqrt_op, + const float* input, + float* output); + +enum xnn_status xnn_run_square_root_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_reciprocal_square_root_nc_f32( + uint32_t flags, xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_reciprocal_square_root_nc_f32( + xnn_operator_t sqrt_op, size_t batch_size, size_t channels, + size_t input_stride, size_t output_stride, pthreadpool_t threadpool); + +enum xnn_status xnn_setup_reciprocal_square_root_nc_f32(xnn_operator_t sqrt_op, + const float* input, + float* output); + +enum xnn_status xnn_run_reciprocal_square_root_nc_f32( + size_t channels, size_t input_stride, size_t output_stride, + size_t batch_size, const float* input, float* output, uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_squared_difference_nd_f16( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_reshape_squared_difference_nd_f16( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_squared_difference_nd_f16( + xnn_operator_t squared_difference_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_squared_difference_nd_f32( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_reshape_squared_difference_nd_f32( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_squared_difference_nd_f32( + xnn_operator_t squared_difference_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_squared_difference_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_f16( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_f16( + xnn_operator_t subtract_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_subtract_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_f32( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_f32( + xnn_operator_t subtract_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_subtract_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_qs8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_qs8( + xnn_operator_t subtract_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_subtract_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_qu8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_qu8( + xnn_operator_t subtract_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_subtract_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_f16( + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_f16( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_f16( + xnn_operator_t tanh_op, + const void* input, + void* output); + +enum xnn_status xnn_create_tanh_nc_f32( + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_f32( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_f32( + xnn_operator_t tanh_op, + const float* input, + float* output); + +enum xnn_status xnn_run_tanh_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_qs8( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_qs8( + xnn_operator_t tanh_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_tanh_nc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_qu8( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_qu8( + xnn_operator_t tanh_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_transpose_nd_x8( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x8( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x8( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x8( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x16( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x16( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x16( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x16( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x32( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x32( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x32( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x32( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x64( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x64( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x64( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x64( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_truncation_nc_f16( + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_reshape_truncation_nc_f16( + xnn_operator_t truncation_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_truncation_nc_f16( + xnn_operator_t truncation_op, + const void* input, + void* output); + +enum xnn_status xnn_create_truncation_nc_f32( + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_reshape_truncation_nc_f32( + xnn_operator_t truncation_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_truncation_nc_f32( + xnn_operator_t truncation_op, + const float* input, + float* output); + +enum xnn_status xnn_run_truncation_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_unpooling2d_nhwc_x32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* unpooling_op_out); + +enum xnn_status xnn_reshape_unpooling2d_nhwc_x32( + xnn_operator_t unpooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_unpooling2d_nhwc_x32( + xnn_operator_t unpooling_op, + const void* input, + const uint32_t* index, + void* output); + +enum xnn_status xnn_create_slice_nd_x8( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x8( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x8( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x8( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x8( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x8( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +#ifdef __cplusplus +} // extern "C" +#endif