diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ab501c853cb67a0410624707df564c6bf8af7f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__init__.py @@ -0,0 +1,444 @@ +import inspect +from collections import defaultdict +from functools import wraps +from itertools import chain +from typing import Callable, Dict, List, Sequence, Union + +import torch +import torch.library +from torch._ops import HigherOrderOperator, OpOverload, OpOverloadPacket +from torch._prims_common import CustomOutParamAnnotation +from torch.utils import _pytree as pytree + +__all__ = [ + "decomposition_table", + "pre_autograd_decomposition_table", + "meta_table", + "register_decomposition", + "get_decompositions", + "core_aten_decompositions", +] + + +# TODO: relax key type here; torch registrations should be possible to; but +# right now this type is accurate +global_decomposition_table: Dict[ + str, Dict[torch._ops.OperatorBase, Callable] +] = defaultdict(dict) + +decomposition_table = global_decomposition_table["post_autograd"] +pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"] +meta_table = global_decomposition_table["meta"] + + +def _add_op_to_registry(registry, op, fn): + """ + This is an internal API for adding an op to the decomposition table. + + If op is OpOverload, it will be added to the registry directly. + If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry. + """ + overloads: List[Union[torch._ops.OperatorBase]] = [] + if isinstance(op, HigherOrderOperator): + # There's no concept of overloads for HigherOrderOperator + registry[op] = fn + return + elif isinstance(op, OpOverload): + overloads.append(op) + else: + assert isinstance(op, OpOverloadPacket) + for ol in op.overloads(): + overloads.append(getattr(op, ol)) + + for op_overload in overloads: + if op_overload in registry: + raise RuntimeError(f"duplicate registrations for {op_overload}") + # TorchScript dumps a bunch of extra nonsense overloads + # which don't have corresponding dispatcher entries, we need + # to filter those out, e.g aten.add.float_int + if torch._C._dispatch_has_kernel(op_overload.name()): + registry[op_overload] = fn + + +def _convert_out_params(f): + out_annotation = f.__annotations__.get("out") + + # If there are no out params, do not wrap the function. + if not out_annotation: + return f + + # Hack to detect when out is a Tuple. There seems to be no pretty way of doing this + if getattr(out_annotation, "__origin__", None) is tuple: + sig = inspect.signature(f) + out_names = sig.return_annotation._fields + # If out is a tuple, we need to register a function that unpacks all the out + # elements as this is what native_functions.yaml expects + + @wraps(f) + def _fn(*args, **kwargs): + out_kwargs = tuple(kwargs.pop(o, None) for o in out_names) + # Either all of the out kwargs are set or none of them + is_none = out_kwargs[0] is None + assert all((o is None) == is_none for o in out_kwargs) + return f(*args, **kwargs, out=None if is_none else out_kwargs) + + out_params = [ + inspect.Parameter( + o, + kind=inspect.Parameter.KEYWORD_ONLY, + default=None, + annotation=t, + ) + for o, t in zip(out_names, out_annotation.__args__) + ] + # Drop the out parameter and concatenate the new kwargs in the signature + params = chain((v for k, v in sig.parameters.items() if k != "out"), out_params) + _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined] + parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type] + ) + # Drop the out parameter and concatenate the new kwargs in the annotations + _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"} + for o in out_params: + _fn.__annotations__[o.name] = o.annotation + + # Propagate that this function is wrapped by `out_wrapper` + _fn._torch_decompositions_out_wrapper = f._torch_decompositions_out_wrapper # type: ignore[attr-defined] + + return _fn + + # Alternatively, there may be a single tensor out parameter with a name + # other than "out". This will need special treatment and is indicated by an + # annotation, which we will remove here so it is not exposed after wrapping. + custom_out_param_name = f.__annotations__.pop(CustomOutParamAnnotation, None) + if custom_out_param_name: + + @wraps(f) + def _fn(*args, **kwargs): + out_kwarg = kwargs.pop(custom_out_param_name, None) + return f(*args, **kwargs, out=out_kwarg) + + out_param = inspect.Parameter( + custom_out_param_name, + kind=inspect.Parameter.KEYWORD_ONLY, + default=None, + annotation=out_annotation, + ) + + # Drop the out parameter and concatenate the new kwarg in the signature + sig = inspect.signature(f) + params = chain( + (v for k, v in sig.parameters.items() if k != "out"), (out_param,) + ) + _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined] + parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type] + ) + + # Drop the out parameter and concatenate the new kwargs in the annotations + _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"} + _fn.__annotations__[out_param.name] = out_param.annotation + + return _fn + + return f + + +def register_decomposition( + aten_op, registry=None, *, type="post_autograd", unsafe=False +): + """ + A decorator to register a function as a decomposition to the Python + decomposition table. Use it like this:: + + @register_decomposition(torch.ops.aten.clamp_min) + def clamp_min(x): + return torch.clamp(self, min=min) + + If you are writing a new decomposition, consider contributing it + directly to PyTorch in torch._decomp.decompositions. + + This API is experimental; we are almost certainly going to extend + the API when we make decompositions eligible for use in transforms (e.g., + autograd) and not just backend tracing, where we then need to know if a + decomposition can be used to simulate a transform. + + By default, we also will register it to the Meta key of dispatcher, + and replace the c++ Meta implementation if there is already one. + + unsafe kwarg is for reuse of this function for registering non-function + things + """ + + assert type in {"post_autograd", "pre_autograd", "meta"} + + def decomposition_decorator(fn: Callable) -> Callable: + if not unsafe: + fn = _convert_out_params(fn) + + nonlocal registry + if registry is None: + registry = global_decomposition_table[type] + + def register(op): + _add_op_to_registry(registry, op, fn) + + # To handle allowing multiple aten_ops at once + pytree.tree_map_(register, aten_op) + return fn + + return decomposition_decorator + + +def get_decompositions( + aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]], + type: str = "post_autograd", +) -> Dict[torch._ops.OperatorBase, Callable]: + """ + Retrieve a dictionary of decompositions corresponding to the list of + operator overloads and overload packets passed as input. Overload + packets will include all decomposed overloads in the packet. If there is + no decomposition for a requested operator, it is silently ignored. + + This API is experimental; we are almost certainly going to give an alternate, + more recommended formulation, where a user provides the set of operators + they know how to implement, and we provide decompositions for everything + not in this set. + """ + assert type in {"post_autograd", "pre_autograd", "meta"} + + registry = global_decomposition_table[type] + packets_to_overloads = defaultdict(list) + for opo in registry: + if isinstance(opo, (OpOverload, OpOverloadPacket)): + packets_to_overloads[opo.overloadpacket].append(opo) + decompositions: Dict[torch._ops.OperatorBase, Callable] = {} + for op in aten_ops: + if isinstance(op, OpOverloadPacket) and op in packets_to_overloads: + for op_overload in packets_to_overloads[op]: + decompositions[op_overload] = registry[op_overload] + elif isinstance(op, (torch._ops.OperatorBase)) and op in registry: + decompositions[op] = registry[op] + return decompositions + + +def remove_decompositions( + decompositions: Dict[torch._ops.OperatorBase, Callable], + aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]], +) -> None: + """ + Given a dictionary of decompositions obtained from get_decompositions(), removes + operators associated with a list of operator overloads and overload packets passed + as input. If the decomposition dictionary does not contain a decomposition that is + specified to be removed, it is silently ignored. + """ + for op in aten_ops: + if isinstance(op, OpOverloadPacket): + for overload_name in op.overloads(): + opo = getattr(op, overload_name) + decompositions.pop(opo, None) + elif isinstance(op, OpOverload): + decompositions.pop(op, None) + + +# populate the table +import torch._decomp.decompositions +import torch._refs + + +# See NOTE [Core ATen Ops] +# +# list was copied from torch/_inductor/decomposition.py +# excluding decompositions that results in prim ops +# Resulting opset of decomposition is core aten ops +def core_aten_decompositions() -> Dict[torch._ops.OperatorBase, Callable]: + aten = torch.ops.aten + return get_decompositions( + [ + aten.addcdiv, + aten.addcdiv_, + aten.addcmul, + aten.addcmul_, + aten.addr, + aten.affine_grid_generator, + aten.all, + aten.aminmax, + aten.arange.default, + aten.arange.start, + aten.avg_pool2d_backward, + aten.baddbmm, + aten.binary_cross_entropy, + aten.binary_cross_entropy_backward, + aten.binary_cross_entropy_with_logits, + aten.celu, + aten.celu_, + aten.clamp_max, + aten.clamp_min, + aten.col2im, + aten.count_nonzero, + aten.cudnn_batch_norm, + aten.cudnn_batch_norm_backward, + aten.deg2rad, + aten.deg2rad_, + aten.detach, + aten.diag_embed, + aten.diagonal_backward, + aten.dot, + aten.vdot, + aten.elu, + aten.elu_, + aten.elu_backward, + aten._embedding_bag, + aten.embedding_dense_backward, + aten.empty_like, + aten._euclidean_dist.default, + aten.expand_as, + aten.eye, + aten.fill, + aten.fill_, + aten.floor_divide, + aten.frac, + aten.frac_, + aten._fused_moving_avg_obs_fq_helper, + aten.gelu_, + aten.gelu_backward, + aten.glu, + aten.glu_backward, + aten.hardshrink, + aten.hardsigmoid, + aten.hardsigmoid_, + aten.hardsigmoid_backward, + aten.hardswish, + aten.hardswish_, + aten.hardswish_backward, + aten.hardtanh_, + aten.hardtanh_backward, + aten.heaviside, + aten.heaviside_, + aten.huber_loss, + aten.huber_loss_backward, + aten.im2col, + aten.index_add, + aten.index_add_, + aten.index_copy, + aten.index_copy_, + aten.index_fill, + aten.index_fill_, + aten.isneginf, + aten.isposinf, + aten.l1_loss, + aten.leaky_relu_, + aten.leaky_relu_backward, + aten.lerp, + aten.lerp_, + aten.linspace, + aten.logaddexp, + aten.logaddexp2, + aten.logit, + aten.logit_, + aten.logit_backward, + aten.log_sigmoid_backward, + aten.log_sigmoid_forward, + aten._log_softmax_backward_data, + aten.logspace, + aten.logsumexp.default, + aten.masked_fill, + aten.masked_fill_, + aten.mish, + aten.mish_, + aten.mse_loss, + aten.mse_loss_backward, + aten.multi_margin_loss, + aten.multilabel_margin_loss_forward, + aten.mv, + aten.mvlgamma, + aten.mvlgamma_, + aten.nansum, + aten.nan_to_num, + aten.nan_to_num_, + aten.narrow, + aten.native_batch_norm_backward, + aten.native_dropout_backward, + aten.native_group_norm_backward, + aten.native_layer_norm_backward, + aten.new_empty, + aten.new_full, + aten.new_ones, + aten.new_zeros, + aten.nll_loss_backward, + aten.nll_loss_forward, + aten.norm, + aten.ones, + aten.ones_like, + aten._prelu_kernel, + aten._prelu_kernel_backward, + aten._reshape_alias, + aten.rad2deg, + aten.rad2deg_, + aten.renorm, + aten.renorm_, + aten.replication_pad2d, + aten.rot90, + aten.rrelu_with_noise, + aten.rrelu_with_noise_, + aten.rsub.Scalar, + aten.rsub.Tensor, + aten._scaled_dot_product_flash_attention.default, + aten.select_backward, + aten.select_scatter, + aten.sgn, + aten.sgn_, + aten.sigmoid_backward, + aten.silu, + aten.silu_, + aten.silu_backward, + aten.sinc, + aten.sinc_, + aten.slice_backward, + aten.smooth_l1_loss, + aten.smooth_l1_loss_backward, + aten.soft_margin_loss, + aten.soft_margin_loss_backward, + aten._softmax_backward_data, + aten.softplus, + aten.softplus_backward, + aten.softshrink, + aten.special_entr, + aten.special_log_ndtr, + aten.special_xlog1py, + aten.split.Tensor, + aten.squeeze.default, + aten.squeeze.dim, + aten.std, + aten.std_mean, + aten.stack, + aten.sum.default, + aten.sum.out, + aten.t, + aten.tanh_backward, + aten.threshold, + aten.threshold_, + aten.threshold_backward, + aten.trace, + aten.transpose.int, + aten.tril, + aten.tril_, + aten.triu, + aten.triu_, + aten.unbind, + aten.unfold_backward, + aten.unfold_copy, + aten._unsafe_index, + aten.unsafe_split.Tensor, + aten.unsafe_split_with_sizes, + aten._unsafe_view, + aten.upsample_bilinear2d, + aten.upsample_nearest2d_backward, + aten.view_as_complex, + aten.xlogy, + aten.xlogy_, + aten.zero, + aten.zero_, + aten.zeros, + aten.zeros_like, + aten._weight_norm_interface, + ] + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ac11f88ebc6cc1d701bf545bf8855a0327125e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f58689ab90804fa94a352d464dca322131f5013 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a24367d4ce48e8a9d7cb3317f7dd51208fd6cb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb3e561261e0836e69300879f6cd1281fcb73956 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions.py b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions.py new file mode 100644 index 0000000000000000000000000000000000000000..acb9125f27b47de0590152927e2a05c962060453 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions.py @@ -0,0 +1,4465 @@ +import functools +import numbers +import operator +import sys +from enum import Enum +from functools import partial, reduce +from itertools import chain, product +from typing import Callable, cast, Iterable, List, Optional, Tuple, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch.nn.functional as F +from torch import sym_float, sym_int, Tensor +from torch._decomp import register_decomposition +from torch._higher_order_ops.out_dtype import out_dtype +from torch._prims_common import IntLike, NumberType, TensorLike, TensorSequenceType +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _safe_copy_out, + out_wrapper, +) +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_map + +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] + +# None of these functions are publicly accessible; get at them +# from torch._decomps +__all__: List[str] = [] + +aten = torch._ops.ops.aten + + +class Reduction(Enum): + NONE = 0 + MEAN = 1 + SUM = 2 + + +# This wraps a decomposition and performs various type promotion logic within it, depending on the strategy provided +# We're currently re-using ELEMENTWISE_TYPE_PROMOTION_KIND, although some of the usages are on non-elementwise ops +# Will need to validate the non-elementwise uses +def type_casts( + f: Callable, + type_promotion: utils.ELEMENTWISE_TYPE_PROMOTION_KIND, + compute_dtype_only: bool = False, +): + @functools.wraps(f) + def inner(*args, **kwargs): + flat_args = [ + x for x in pytree.arg_tree_leaves(*args, **kwargs) if isinstance(x, Tensor) + ] + computation_dtype, result_dtype = utils.elementwise_dtypes( + *flat_args, type_promotion_kind=type_promotion + ) + + # TODO: pretty sure this is not quite right + def increase_prec(x): + if isinstance(x, Tensor): + return x.to(computation_dtype) + else: + return x + + def decrease_prec(x): + if isinstance(x, Tensor): + return x.to(result_dtype) + else: + return x + + r = f(*tree_map(increase_prec, args), **tree_map(increase_prec, kwargs)) + if compute_dtype_only: + return r + else: + return tree_map(decrease_prec, r) + + return inner + + +compute_only_pw_cast_for_opmath = partial( + type_casts, + type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + compute_dtype_only=True, +) +pw_cast_for_opmath = partial( + type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT +) +pw_cast_for_int_to_real = partial( + type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT +) + + +# This expands x until x.dim() == dim. Might be useful as an operator +def _unsqueeze_to_dim(x: Tensor, dim: int) -> Tensor: + for _ in range(dim - x.dim()): + x = x.unsqueeze(-1) + return x + + +@register_decomposition(aten.tanh_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def tanh_backward(out_grad: Tensor, y: Tensor): + return out_grad * (1 - y * y).conj_physical() + + +@register_decomposition(aten.sigmoid_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def sigmoid_backward(out_grad: Tensor, y: Tensor): + return out_grad * (y * (1 - y)).conj_physical() + + +@register_decomposition(aten.softplus_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def softplus_backward(out_grad: Tensor, x: Tensor, beta: float, threshold: float): + z = (x * beta).exp() + return torch.where((x * beta) > threshold, out_grad, out_grad * z / (z + 1.0)) + + +@register_decomposition(aten.elu_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def elu_backward( + grad_output: Tensor, + alpha: float, + scale: float, + input_scale: float, + is_result: bool, + self_or_result: Tensor, +): + negcoef = alpha * scale + poscoef = scale + negiptcoef = input_scale + if is_result: + return torch.where( + self_or_result <= 0, + grad_output * negiptcoef * (self_or_result + negcoef), + grad_output * poscoef, + ) + else: + return torch.where( + self_or_result <= 0, + grad_output * negiptcoef * negcoef * torch.exp(self_or_result * negiptcoef), + grad_output * poscoef, + ) + + +@register_decomposition([aten.fill.Scalar]) +def fill_scalar(self, value): + return torch.full_like(self, value) + + +@register_decomposition([aten.fill.Tensor]) +def fill_tensor(self, value: Tensor): + torch._check( + value.dim() == 0, + lambda: f"fill only supports 0-dimension value tensor but got tensor with {value.dim()} dimensions", + ) + return aten.copy(self, value) + + +@register_decomposition(aten.hardsigmoid) +@out_wrapper() +@pw_cast_for_opmath +def hardsigmoid(self: Tensor) -> Tensor: + return torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6 + + +@register_decomposition(aten.hardsigmoid_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def hardsigmoid_backward(grad_output: Tensor, self: Tensor): + return torch.where( + (self > -3.0) & (self < 3.0), + grad_output * (1.0 / 6.0), + 0.0, + ) + + +@register_decomposition(aten.hardtanh_backward) +@out_wrapper("grad_input") +def hardtanh_backward( + grad_output: Tensor, self: Tensor, min_val: float, max_val: float +): + return torch.where((self <= min_val) | (self >= max_val), 0.0, grad_output) + + +@register_decomposition(aten.hardswish) +@out_wrapper() +@pw_cast_for_opmath +def hardswish(self: Tensor) -> Tensor: + return self * torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6 + + +@register_decomposition(aten.hardswish_backward) +@out_wrapper() +@pw_cast_for_opmath +def hardswish_backward(grad_output: Tensor, self: Tensor) -> Tensor: + return torch.where( + self < -3, + 0.0, + torch.where(self <= 3, grad_output * ((self / 3) + 0.5), grad_output), + ) + + +@register_decomposition(aten.threshold_backward) +@out_wrapper("grad_input") +def threshold_backward(grad_output: Tensor, self: Tensor, threshold: float): + return torch.where(self <= threshold, 0, grad_output) + + +@register_decomposition(aten.leaky_relu_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def leaky_relu_backward( + grad_output: Tensor, self: Tensor, negative_slope: float, self_is_result: bool +): + return torch.where(self > 0, grad_output, grad_output * negative_slope) + + +@register_decomposition(aten.gelu_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def gelu_backward(grad: Tensor, self: Tensor, approximate: str = "none"): + M_SQRT2 = 1.41421356237309504880 + M_SQRT1_2 = 0.70710678118654752440 + M_2_SQRTPI = 1.12837916709551257390 + if approximate == "tanh": + kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 + kKappa = 0.044715 + x_sq = self * self + x_cube = x_sq * self + inner = kBeta * (self + kKappa * x_cube) + tanh_inner = torch.tanh(inner) + + left = 0.5 * self + right = 1 + tanh_inner + + left_derivative = 0.5 * right + + tanh_derivative = 1 - tanh_inner * tanh_inner + inner_derivative = kBeta * (1 + 3 * kKappa * x_sq) + right_derivative = left * tanh_derivative * inner_derivative + + return grad * (left_derivative + right_derivative) + else: + kAlpha = M_SQRT1_2 + kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5 + cdf = 0.5 * (1 + torch.erf(self * kAlpha)) + pdf = kBeta * torch.exp(self * self * -0.5) + return grad * (cdf + self * pdf) + + +@register_decomposition(aten.mish_backward) +@pw_cast_for_opmath +def mish_backward(grad_output: Tensor, input: Tensor): + input_tanh_softplus = torch.tanh(F.softplus(input)) + input_sigmoid = torch.sigmoid(input) + out = input * input_sigmoid * (1 - input_tanh_softplus * input_tanh_softplus) + return grad_output * (input_tanh_softplus + out) + + +@register_decomposition(aten.silu) +@out_wrapper() +@pw_cast_for_opmath +def silu(self: Tensor) -> Tensor: + return self * torch.sigmoid(self) + + +@register_decomposition(aten.silu_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def silu_backward(grad_output: Tensor, self: Tensor) -> Tensor: + sigmoid = 1 / (1 + torch.exp(-self)) + return grad_output * sigmoid * (1 + self * (1 - sigmoid)) + + +@register_decomposition(aten._prelu_kernel) +def _prelu_kernel(self: Tensor, weight: Tensor) -> Tensor: + return torch.where(self > 0, self, weight * self) + + +@register_decomposition(aten._prelu_kernel_backward) +def _prelu_kernel_backward( + grad_output: Tensor, + self: Tensor, + weight: Tensor, +) -> Tuple[Tensor, Tensor]: + input_grad = torch.where(self > 0, grad_output, weight * grad_output) + weight_grad = torch.where(self > 0, 0.0, self * grad_output) + return (input_grad, weight_grad) + + +@register_decomposition(aten.rrelu_with_noise) +@aten.rrelu_with_noise.default.py_impl(DispatchKey.AutogradCUDA) +@out_wrapper() +@pw_cast_for_opmath +def rrelu_with_noise( + self: Tensor, + noise: Tensor, + lower: float, + upper: float, + training: bool = False, + generator: Optional[torch.Generator] = None, +) -> Tensor: + assert generator is None + if training: + not_positive = self <= 0 + r = aten.uniform(self, lower, upper) + output = torch.where(not_positive, self * r, self) + noise.copy_(torch.where(not_positive, r, 1)) + return output + else: + negative_slope = (lower + upper) / 2 + return aten.leaky_relu(self, negative_slope) + + +@register_decomposition(aten.rrelu_with_noise_) +@aten.rrelu_with_noise_.default.py_impl(DispatchKey.AutogradCUDA) +@pw_cast_for_opmath +def rrelu_with_noise_( + self: Tensor, + noise: Tensor, + lower: float, + upper: float, + training: bool = False, + generator: Optional[torch.Generator] = None, +) -> Tensor: + return self.copy_(rrelu_with_noise(self, noise, lower, upper, training, generator)) + + +@register_decomposition(aten.rrelu_with_noise_backward) +@out_wrapper() +@pw_cast_for_opmath +def rrelu_with_noise_backward( + grad_output: Tensor, + self: Tensor, + noise: Tensor, + lower: float, + upper: float, + training: bool, + self_is_result: bool, +) -> Tensor: + if training and upper - lower > 1e-6: + return grad_output.mul(noise) + else: + negative_slope = (lower + upper) / 2 + return aten.leaky_relu_backward( + grad_output, self, negative_slope, self_is_result + ) + + +@register_decomposition(aten.log_sigmoid_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def log_sigmoid_backward(grad_output: Tensor, self: Tensor, buffer: Tensor) -> Tensor: + in_negative = self < 0 + max_deriv = torch.where(in_negative, 1, 0) + sign = torch.where(in_negative, 1, -1) + z = torch.exp(-torch.abs(self)) + return grad_output * (max_deriv - sign * (z / (1 + z))) + # CPU has a special formula that uses buffer, but disabled for convenience sake + # return (max_deriv - sign * (buffer / (1 + buffer))) * grad_output + + +def apply_loss_reduction(loss: Tensor, reduction: int): + if reduction == Reduction.MEAN.value: + return torch.mean(loss) + elif reduction == Reduction.SUM.value: + return torch.sum(loss) + else: + return loss + + +def to_real_dtype(dtype: torch.dtype): + if dtype == torch.complex32: + return torch.float16 + elif dtype == torch.complex64: + return torch.float32 + elif dtype == torch.complex128: + return torch.float64 + + +# TODO: None of these loss castings are quite correct, see +# https://github.com/pytorch/pytorch/issues/76870. Also, the ATen kernels +# perform the pointwise portion in opmath, but don't maintain it between the +# pointwise portion and the reduction + + +@register_decomposition(aten.mse_loss) +@out_wrapper() +@pw_cast_for_opmath +def mse_loss( + self: Tensor, target: Tensor, reduction: int = Reduction.MEAN.value +) -> Tensor: + loss = (self - target) ** 2 + return apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.mse_loss_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def mse_loss_backward( + grad_output: Tensor, input: Tensor, target: Tensor, reduction: int +): + norm = 2.0 / input.numel() if reduction == Reduction.MEAN.value else 2.0 + return norm * (input - target) * grad_output + + +@register_decomposition(aten.smooth_l1_loss) +@out_wrapper() +@pw_cast_for_opmath +def smooth_l1_loss( + self: Tensor, + target: Tensor, + reduction: int = Reduction.MEAN.value, + beta: float = 1.0, +): + loss = (self - target).abs() + loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta) + return apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.smooth_l1_loss_backward.default) +@pw_cast_for_opmath +def smooth_l1_loss_backward( + grad_output: Tensor, self: Tensor, target: Tensor, reduction: int, beta: float +): + norm = 1.0 / self.numel() if reduction == Reduction.MEAN.value else 1.0 + x = self - target + abs_x = torch.abs(x) + norm_grad = norm * grad_output + return torch.where( + abs_x < beta, + norm_grad * x / beta, + norm_grad * torch.sign(x), + ) + + +@register_decomposition(aten.smooth_l1_loss_backward.grad_input) +@pw_cast_for_opmath +def smooth_l1_loss_backward_out( + grad_output: Tensor, + self: Tensor, + target: Tensor, + reduction: int, + beta: float, + grad_input: Tensor, +): + result = smooth_l1_loss_backward(grad_output, self, target, reduction, beta) + _maybe_resize_out(grad_input, result.shape) + return _safe_copy_out(copy_from=result, copy_to=grad_input, exact_dtype=True) + + +@register_decomposition(aten.huber_loss_backward.default) +@pw_cast_for_opmath +def huber_loss_backward( + grad_output: Tensor, self: Tensor, target: Tensor, reduction: int, delta: float +): + norm = 1.0 / self.numel() if reduction == Reduction.MEAN.value else 1.0 + x = self - target + return torch.where( + x < -delta, + -norm * grad_output * delta, + torch.where(x > delta, norm * grad_output * delta, norm * x * grad_output), + ) + + +# We cannot use @out_wrapper() here, because the output tensor is not named 'out', it's 'grad_input' +@register_decomposition(aten.huber_loss_backward.out) +@pw_cast_for_opmath +def huber_loss_backward_out( + grad_output: Tensor, + self: Tensor, + target: Tensor, + reduction: int, + delta: float, + grad_input: Tensor, +): + result = huber_loss_backward(grad_output, self, target, reduction, delta) + _maybe_resize_out(grad_input, result.shape) + return _safe_copy_out(copy_from=result, copy_to=grad_input, exact_dtype=True) + + +def _nll_loss_backward( + grad_output: Tensor, + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, + total_weight: Tensor, +) -> Tensor: + channel_dim = 0 if self.dim() < 2 else 1 + if reduction == Reduction.MEAN.value: + grad_output = grad_output / total_weight + + target = target.unsqueeze(channel_dim) + safe_target = torch.where(target != ignore_index, target, 0) + grad_input = torch.zeros_like(self) + grad_input = torch.scatter(grad_input, channel_dim, safe_target, -1.0) + + if grad_input.dim() > grad_output.dim() > 0: + grad_output = grad_output.unsqueeze(channel_dim) + + if weight is not None: + new_shape = [1 for _ in range(self.dim())] + new_shape[channel_dim] = weight.shape[0] + weight = weight.reshape(new_shape) + grad_output = grad_output * weight + + grad_output = torch.where(target != ignore_index, grad_output, 0) + + return grad_input * grad_output + + +@register_decomposition(aten.glu_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def glu_backward(grad_output: Tensor, self: Tensor, dim: int) -> Tensor: + assert self.dim() > 0, "glu does not support 0-dimensional tensors" + wrap_dim = utils.canonicalize_dim(self.dim(), dim) + nIn = self.size(wrap_dim) + assert ( + nIn % 2 == 0 + ), f"Halving dimension must be even, but dimension {wrap_dim} is size {nIn}" + inputSize = nIn // 2 + firstHalf = self.narrow(wrap_dim, 0, inputSize) + secondHalf = self.narrow(wrap_dim, inputSize, inputSize) + gradInputFirstHalf = torch.sigmoid(secondHalf) + gradInputSecondHalf = ( + (1.0 - gradInputFirstHalf) * gradInputFirstHalf * firstHalf * grad_output + ) + gradInputFirstHalf = gradInputFirstHalf * grad_output + return torch.cat([gradInputFirstHalf, gradInputSecondHalf], dim=wrap_dim) + + +@register_decomposition(aten.nll_loss_backward) +@out_wrapper("grad_input") +def nll_loss_backward( + grad_output: Tensor, + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, + total_weight: Tensor, +) -> Tensor: + assert 0 <= self.dim() <= 2, "input tensor should be 1D or 2D" + assert ( + target.dim() <= 1 + ), "0D or 1D target tensor expected, multi-target not supported" + + no_batch_dim = self.dim() == 1 and target.dim() == 0 + assert no_batch_dim or ( + self.shape[0] == target.shape[0] + ), f"size mismatch (got input: {self.shape}, target: {target.shape})" + assert total_weight.numel() == 1, ( + "expected total_weight to be a single element tensor, got: ", + f"{total_weight.shape} ({total_weight.numel()} elements)", + ) + + assert ( + weight is None or weight.numel() == self.shape[-1] + ), "weight tensor should be defined either for all or no classes" + + if reduction == Reduction.NONE.value and self.dim() == 2: + assert grad_output.dim() == 1 and grad_output.shape[0] == self.shape[0], ( + f"Expected a tensor of dimension 1 and tensor.size[0] == {self.shape[0]} but " + f"got: dimension {grad_output.dim()} and tensor.size[0] == {grad_output.shape[0]}" + ) + else: + assert ( + grad_output.dim() <= 1 and grad_output.numel() == 1 + ), f"Expected a single element grad_output tensor, but got: {grad_output.shape}" + + return _nll_loss_backward( + grad_output, self, target, weight, reduction, ignore_index, total_weight + ) + + +@register_decomposition(aten.nll_loss2d_backward) +@out_wrapper("grad_input") +def nll_loss2d_backward( + grad_output: Tensor, + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, + total_weight: Tensor, +) -> Tensor: + assert ( + self.dim() == 4 + ), f"only batches of spatial inputs supported (4D tensors), but got input of dimension: {self.dim()}" + + assert ( + target.dim() == 3 + ), f"only batches of spatial targets supported (3D tensors) but got targets of dimension: {target.dim()}" + + assert ( + self.shape[0] == target.shape[0] + and self.shape[2] == target.shape[1] + and self.shape[3] == target.shape[2] + ), f"size mismatch (got input: {self.shape}, target: {target.shape}" + + assert total_weight.numel() == 1, ( + "expected total_weight to be a single element tensor, " + f"got: {total_weight.shape} ( {total_weight.numel()}, elements)" + ) + + return _nll_loss_backward( + grad_output, self, target, weight, reduction, ignore_index, total_weight + ) + + +@register_decomposition(aten.binary_cross_entropy) +@out_wrapper() +@pw_cast_for_opmath +def binary_cross_entropy( + self: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + reduction: int = Reduction.MEAN.value, +) -> Tensor: + # We cannot currently model this without introducing data-dependent control flow + # TORCH_CHECK( + # (input_val >= 0) && (input_val <= 1), + # "all elements of input should be between 0 and 1" + # ) + loss = (target - 1) * torch.maximum( + torch.log1p(-self), self.new_full((), -100) + ) - target * torch.maximum(torch.log(self), self.new_full((), -100)) + if weight is not None: + loss = loss * weight + return apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.binary_cross_entropy_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def binary_cross_entropy_backward( + grad_output: Tensor, + self: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + reduction: int = Reduction.MEAN.value, +) -> Tensor: + EPSILON = 1e-12 + result = grad_output * (self - target) / torch.clamp(self * (1 - self), min=EPSILON) + if weight is not None: + result = result * weight + if reduction == Reduction.MEAN.value: + result = result / self.numel() + return result + + +@register_decomposition(aten.soft_margin_loss) +@out_wrapper() +@pw_cast_for_opmath +def soft_margin_loss( + input: Tensor, + target: Tensor, + reduction: int = Reduction.MEAN.value, +) -> Tensor: + loss = torch.log1p(torch.exp(-input * target)) + return apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.soft_margin_loss_backward) +@out_wrapper("grad_input") +@pw_cast_for_opmath +def soft_margin_loss_backward( + grad_output: Tensor, + self: Tensor, + target: Tensor, + reduction: int = Reduction.MEAN.value, +) -> Tensor: + grad_input = target * grad_output * (torch.sigmoid(target * self) - 1) + if reduction == Reduction.MEAN.value: + grad_input = grad_input / self.numel() + return grad_input + + +@register_decomposition(aten.dist) +@out_wrapper() +def dist(input: Tensor, other: Tensor, p: float = 2): + return aten.norm(input - other, p=p) + + +@register_decomposition(aten._euclidean_dist) +@out_wrapper() +def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: + x1_norm = x1.pow(2).sum(-1, True) + x1_pad = torch.ones_like(x1_norm, memory_format=torch.contiguous_format) + x2_norm = x2.pow(2).sum(-1, True) + x2_pad = torch.ones_like(x2_norm, memory_format=torch.contiguous_format) + x1_ = torch.cat([x1.mul(-2), x1_norm, x1_pad], -1) + x2_ = torch.cat([x2, x2_pad, x2_norm], -1) + result = x1_.matmul(x2_.mT) + return result.clamp_min(0).sqrt() + + +@register_decomposition(aten.slice_backward) +@out_wrapper() +def slice_backward( + grad_output: Tensor, + input_sizes: List[int], + dim: int, + start: int, + end: int, + step: int, +): + grad_input = grad_output.new_zeros(input_sizes) + return torch.slice_scatter(grad_input, grad_output, dim, start, end, step) + + +@register_decomposition(aten.slice.Tensor) +def slice_forward( + # Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1 + self: Tensor, + dim: int = 0, + start: Optional[int] = None, + end: Optional[int] = None, + step: int = 1, +): + ndim = self.dim() + if ndim == 0: + raise RuntimeError("slice() cannot be applied to a 0-dim tensor.") + dim = utils.canonicalize_dim(self.dim(), dim) + sizes = list(self.size()) + strides = list(self.stride()) + + if step <= 0: + raise RuntimeError("slice step must be positive") + + start_val = start if start is not None else 0 + end_val = end if end is not None else sys.maxsize # 2^63 – 1 + + if start_val < 0: + start_val += sizes[dim] + + if end_val < 0: + end_val += sizes[dim] + + if start_val < 0: + start_val = 0 + elif start_val > sizes[dim]: + start_val = sizes[dim] + + if end_val < start_val: + end_val = start_val + elif end_val > sizes[dim]: + end_val = sizes[dim] + + storage_offset = self.storage_offset() + start_val * strides[dim] + len = end_val - start_val + sizes[dim] = (len + step - 1) // step + strides[dim] *= step + + if self.is_quantized: + raise NotImplementedError( + "Slice decomposition for quantized tensors aren't implemented" + ) + else: + return self.as_strided(sizes, strides, storage_offset) + + +@register_decomposition(aten.select_backward) +@out_wrapper() +def select_backward(grad_output: Tensor, input_sizes: List[int], dim: int, index: int): + grad_input = grad_output.new_zeros(input_sizes) + return torch.select_scatter(grad_input, grad_output, dim, index) + + +@register_decomposition(aten.diagonal_backward) +@out_wrapper() +def diagonal_backward( + grad_output: Tensor, input_sizes: List[int], offset: int, dim1: int, dim2: int +): + grad_input = grad_output.new_zeros(input_sizes) + return torch.diagonal_scatter(grad_input, grad_output, offset, dim1, dim2) + + +def _cast_grad_to_input_dtype( + grad_output: Tensor, grad_input: Tensor, input_dtype: torch.dtype +): + if grad_output.dtype != input_dtype: + grad_input = grad_input.to(input_dtype) + return grad_input + + +@register_decomposition(aten._softmax_backward_data) +@out_wrapper("grad_input") +@compute_only_pw_cast_for_opmath +def _softmax_backward_data( + grad_output: Tensor, output: Tensor, dim: int, input_dtype: torch.dtype +): + new_grad_output = grad_output * output + grad_input = new_grad_output - output * torch.sum( + new_grad_output, dim=dim, keepdim=True + ) + + # CPU kernel doesn't respect input_dtype, but following check doesn't work for meta tensor + # if grad_output.device == torch.device("cpu"): + # return grad_input.contiguous() + + return _cast_grad_to_input_dtype(grad_output, grad_input, input_dtype).contiguous() + + +@register_decomposition(aten._log_softmax_backward_data) +@out_wrapper() +@compute_only_pw_cast_for_opmath +def _log_softmax_backward_data( + grad_output: Tensor, output: Tensor, dim: int, input_dtype: torch.dtype +): + grad_input = grad_output - torch.exp(output) * torch.sum( + grad_output, dim=dim, keepdim=True + ) + return _cast_grad_to_input_dtype(grad_output, grad_input, input_dtype) + + +def _im2col_col2im_indices_along_dim( + input_d, kernel_d, dilation_d, padding_d, stride_d, device +): + """Utility function to implement im2col and col2im""" + blocks_d = input_d + padding_d * 2 - dilation_d * (kernel_d - 1) + + arange_kw = partial(torch.arange, dtype=torch.int64, device=device) + + # Stride kernel over input and find starting indices along dim d + blocks_d_indices = arange_kw(0, blocks_d, stride_d).unsqueeze(0) + + # Apply dilation on kernel and find its indices along dim d + kernel_grid = arange_kw(0, kernel_d * dilation_d, dilation_d).unsqueeze(-1) + + # Broadcast and add kernel starting positions (indices) with + # kernel_grid along dim d, to get block indices along dim d + return blocks_d_indices + kernel_grid + + +@register_decomposition(aten.im2col) +@out_wrapper() +@pw_cast_for_opmath +def im2col( + input: Tensor, + kernel_size: List[int], + dilation: List[int], + padding: List[int], + stride: List[int], +) -> Tensor: + torch._check(len(kernel_size) == 2, lambda: "im2col(): only 2D kernel supported") + torch._check(len(dilation) == 2, lambda: "im2col(): only 2D dilation supported") + torch._check(len(padding) == 2, lambda: "im2col(): only 2D padding supported") + torch._check(len(stride) == 2, lambda: "im2col(): only 2D stride supported") + + def check_positive(param, param_name, strict=True): + cond = all(p > 0 for p in param) if strict else all(p >= 0 for p in param) + torch._check( + cond, lambda: "{param_name} should be greater {'than' zero, but got {param}" + ) + + check_positive(kernel_size, "kernel_size") + check_positive(dilation, "dilation") + check_positive(dilation, "padding", strict=False) + check_positive(stride, "stride") + + shape = input.shape + ndim = len(shape) + torch._check( + ndim in (3, 4) and all(d != 0 for d in shape[-3:]), + lambda: "Expected 3D or 4D (batch mode) tensor for input with possible 0 batch size " + f"and non-zero dimensions, but got: {tuple(shape)}", + ) + output_size = tuple( + 1 + (out + 2 * pad - dil * (ker - 1) - 1) // st + for out, pad, dil, ker, st in zip( + shape[-2:], padding, dilation, kernel_size, stride + ) + ) + torch._check( + all(c > 0 for c in output_size), + lambda: f"Given an input with spacial size {tuple(shape[-2:])}, " + f"kernel_size={kernel_size}, dilation={dilation}, " + f"padding={padding}, stride={stride}, " + "the calculated shape of the array of sliding blocks " + f"is {output_size}, but its components must be at least one.", + ) + batched_input = ndim == 4 + if not batched_input: + input = input.unsqueeze(0) + + batch_dim, channel_dim, input_h, input_w = input.shape + + stride_h, stride_w = stride + padding_h, padding_w = padding + dilation_h, dilation_w = dilation + kernel_h, kernel_w = kernel_size + + blocks_row_indices = _im2col_col2im_indices_along_dim( + input_h, kernel_h, dilation_h, padding_h, stride_h, input.device + ) + blocks_col_indices = _im2col_col2im_indices_along_dim( + input_w, kernel_w, dilation_w, padding_w, stride_w, input.device + ) + + # Note that F.pad takes (padding_left, padding_right, padding_top, padding_bottom) + # ugh + padded_input = F.pad(input, (padding_w, padding_w, padding_h, padding_h)) + + blocks_row_indices = blocks_row_indices.unsqueeze(-1).unsqueeze(-1) + output = padded_input[:, :, blocks_row_indices, blocks_col_indices] + output = output.permute(0, 1, 2, 4, 3, 5) + num_blocks_row = blocks_row_indices.size(1) + num_blocks_col = blocks_col_indices.size(1) + output = output.reshape( + batch_dim, channel_dim * kernel_h * kernel_w, num_blocks_row * num_blocks_col + ) + + if not batched_input: + output = output.squeeze(0) + return output + + +@register_decomposition(aten.col2im) +@out_wrapper() +@pw_cast_for_opmath +def col2im( + input: Tensor, + output_size: List[int], + kernel_size: List[int], + dilation: List[int], + padding: List[int], + stride: List[int], +) -> Tensor: + torch._check(len(output_size) == 2, lambda: "only 2D output_size supported") + torch._check(len(kernel_size) == 2, lambda: "only 2D kernel supported") + torch._check(len(dilation) == 2, lambda: "only 2D dilation supported") + torch._check(len(padding) == 2, lambda: "only 2D padding supported") + torch._check(len(stride) == 2, lambda: "only 2D stride supported") + + def check_positive(param, param_name, strict=True): + cond = all(p > 0 for p in param) if strict else all(p >= 0 for p in param) + torch._check( + cond, lambda: "{param_name} should be greater than zero, but got {param}" + ) + + check_positive(kernel_size, "kernel_size") + check_positive(dilation, "dilation") + check_positive(padding, "padding", strict=False) + check_positive(stride, "stride") + check_positive(output_size, "output_size") + + shape = input.shape + ndim = len(shape) + torch._check( + ndim in (2, 3) and all(d != 0 for d in shape[-2:]), + lambda: "Expected 2D or 3D (batch mode) tensor for input with possible 0 batch size " + f"and non-zero dimensions, but got: {tuple(shape)}", + ) + prod_kernel_size = kernel_size[0] * kernel_size[1] + torch._check( + shape[-2] % prod_kernel_size == 0, + lambda: "Expected size of input's first non-batch dimension to be divisible by the " + f"product of kernel_size, but got input.shape[-2] = {shape[-2]} and " + f"kernel_size={kernel_size}", + ) + col = [ + 1 + (out + 2 * pad - dil * (ker - 1) - 1) // st + for out, pad, dil, ker, st in zip( + output_size, padding, dilation, kernel_size, stride + ) + ] + L = col[0] * col[1] + torch._check( + shape[-1] == L, + lambda: f"Given output_size={output_size}, kernel_size={kernel_size}, " + f"dilation={dilation}, padding={padding}, stride={stride}, " + f"expected input.size(-1) to be {L} but got {shape[-1]}.", + ) + torch._check( + L > 0, + lambda: f"Given output_size={output_size}, kernel_size={kernel_size}, " + f"dilation={dilation}, padding={padding}, stride={stride}, " + f"expected input.size(-1) to be {L} but got {shape[-1]}.", + ) + batched_input = ndim == 3 + if not batched_input: + input = input.unsqueeze(0) + + shape = input.shape + + out_h, out_w = output_size + stride_h, stride_w = stride + padding_h, padding_w = padding + dilation_h, dilation_w = dilation + kernel_h, kernel_w = kernel_size + + # col2im is defined as the backwards of im2col, so we differentiate its decomposition by hand + input = input.reshape([shape[0], shape[1] // prod_kernel_size] + kernel_size + col) + input = input.permute(0, 1, 2, 4, 3, 5) + + indices_row = _im2col_col2im_indices_along_dim( + out_h, kernel_h, dilation_h, padding_h, stride_h, input.device + ) + indices_row = _unsqueeze_to_dim(indices_row, 4) + indices_col = _im2col_col2im_indices_along_dim( + out_w, kernel_w, dilation_w, padding_w, stride_w, input.device + ) + + output_padded_size = [o + 2 * p for o, p in zip(output_size, padding)] + output = input.new_zeros( + [shape[0], shape[1] // prod(kernel_size)] + output_padded_size + ) + idx = (None, None, indices_row, indices_col) + output = aten._unsafe_index_put(output, idx, input, accumulate=True) + output = F.pad(output, (-padding_w, -padding_w, -padding_h, -padding_h)) + + if not batched_input: + output = output.squeeze(0) + return output + + +@register_decomposition(aten.native_dropout_backward) +@out_wrapper() +def native_dropout_backward(grad_output: Tensor, mask: Tensor, scale: float): + # According to the CUDA kernel implementation we should have this test; + # but it seems to fail tests! + # torch._check(mask.dtype == torch.bool, lambda: f"Mask should be Bool Scalar Type {mask.dtype}") + + # Mimicking CUDA kernel's behavior for output stride: output follow input's memory format + # This different from TensorIterator's behavior + r = (grad_output * (mask.type_as(grad_output) * scale)).clone( + memory_format=utils.suggest_memory_format(grad_output) + ) + return r + + +@register_decomposition(aten.unfold_backward) +@out_wrapper() +def unfold_backward( + grad: Tensor, input_size: List[int], dimension: int, size: int, step: int +) -> Tensor: + if len(input_size) == 0: + return torch.squeeze_copy(grad, 0) + dim = utils.canonicalize_dim(len(input_size), dimension) + idx = torch.arange(input_size[dim], device=grad.device, dtype=torch.int32) + idx = idx.unfold(0, size, step).flatten() + grad = grad.movedim(-1, dim + 1).flatten(dim, dim + 1) + # nb. At the moment this generates two kernels in triton + # It could potentially be fused into one call to scatter_reduce, + # in the case step <= size provided scatter_reduce generates 1 kernel + grad_input = grad.new_zeros(input_size) + index = (None,) * dim + (idx,) + return aten._unsafe_index_put(grad_input, index, grad, accumulate=True).contiguous() + + +@register_decomposition(aten.logit_backward.default) +@pw_cast_for_opmath +def logit_backward( + grad_output: Tensor, self: Tensor, eps: Optional[float] = None +) -> Tensor: + if eps is not None: + lo = eps + hi = 1.0 - lo + return torch.where( + torch.logical_and(self >= lo, self <= hi), + grad_output / (self * (1.0 - self)), + 0.0, + ) + else: + return torch.where( + torch.logical_and(self >= 0.0, self <= 1.0), + grad_output / (self * (1.0 - self)), + self.new_full((), float("nan")), + ) + + +@register_decomposition(aten.dropout) +@aten.dropout.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.dropout.default.py_impl(DispatchKey.Autograd) +def dropout(input: Tensor, p: float, train: Optional[bool]): + if train and p != 0: + return aten.native_dropout(input, p, train)[0] + else: + return input.clone() + + +@register_decomposition(aten.native_dropout) +@out_wrapper("out0", "out1") +def native_dropout(input: Tensor, p: float, train: Optional[bool]): + if train and p != 0: + if p == 1: + return (torch.zeros_like(input), torch.zeros_like(input, dtype=torch.bool)) + if not input.dtype.is_floating_point: + raise RuntimeError( + "result type Float can't be cast to the desired output type Long" + ) + bool_mask = torch.rand_like(input) > p + res = bool_mask * input * float(1.0 / (1.0 - p)) + return (res, bool_mask) + else: + return (input, torch.ones_like(input, dtype=torch.bool)) + + +@register_decomposition(aten._softmax) +@out_wrapper() +def _softmax(x: Tensor, dim: int, half_to_float: bool): + # eager softmax returns a contiguous tensor. Ensure that decomp also returns + # a contiguous tensor. + x = x.contiguous() + if half_to_float: + assert x.dtype == torch.half + computation_dtype, result_dtype = utils.elementwise_dtypes( + x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + x = x.to(computation_dtype) + if x.numel() == 0: + unnormalized = torch.exp(x) + else: + x_max = torch.amax(x, dim, keepdim=True) + unnormalized = torch.exp(x - x_max) + result = unnormalized / torch.sum(unnormalized, dim, keepdim=True) + if not half_to_float: + result = result.to(result_dtype) + return result + + +@register_decomposition(aten._log_softmax) +@out_wrapper() +def _log_softmax(x: Tensor, dim: int, half_to_float: bool): + # eager log_softmax returns a contiguous tensor. Ensure that decomp also + # returns a contiguous tensor. + x = x.contiguous() + if half_to_float: + assert x.dtype == torch.half + computation_dtype, result_dtype = utils.elementwise_dtypes( + x, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + x = x.to(computation_dtype) + if x.numel() == 0: + shifted = x + else: + x_max = torch.amax(x, dim, keepdim=True) + shifted = x - x_max + shifted_logsumexp = torch.log(torch.sum(torch.exp(shifted), dim, keepdim=True)) + result = shifted - shifted_logsumexp + if not half_to_float: + result = result.to(result_dtype) + return result + + +@register_decomposition(aten.rsub.Tensor) +def rsub_Tensor(self: Tensor, other: Tensor, alpha: float = 1) -> Tensor: + return torch.sub(other, self, alpha=alpha) + + +@register_decomposition(aten.rsub.Scalar) +def rsub_Scalar(self: Tensor, other: float, alpha: float = 1) -> Tensor: + return torch.sub(other, self, alpha=alpha) + + +@register_decomposition(aten.embedding) +@out_wrapper() +def embedding( + weight: Tensor, + indices: Tensor, + padding_idx: int = -1, + scale_grad_by_freq: bool = False, + sparse: bool = False, +) -> Tensor: + assert weight.dim() == 2, "'weight' must be 2-D" + # Nb. scale_grad_by_freq is not used in the forward + if indices.ndim <= 1: + # We need this one as weight[indices] calls item() in these cases + out = weight.index_select(0, indices) + if indices.ndim == 0: + out = out.squeeze(0) + return out + else: + return weight[indices] + + +@register_decomposition(aten.embedding_dense_backward) +@out_wrapper() +def embedding_dense_backward( + grad_output: Tensor, + indices: Tensor, + num_weights: int, + padding_idx: int, + scale_grad_by_freq: bool, +): + computation_dtype, result_dtype = utils.elementwise_dtypes( + grad_output, type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT + ) + grad_output = grad_output.to(computation_dtype) + indices = _maybe_convert_to_dtype(indices, torch.long) # type: ignore[assignment] + if scale_grad_by_freq: + counts = indices.new_zeros((num_weights,)) + ones = torch.ones_like(indices) + counts = aten._unsafe_index_put(counts, [indices], ones, accumulate=True) + grad_weights_scale = counts[indices] + grad_output = grad_output / grad_weights_scale.unsqueeze(-1) + + mask = _unsqueeze_to_dim(indices == padding_idx, grad_output.ndim) + grad = grad_output.masked_fill(mask, 0) + grad_weight = grad_output.new_zeros( + (num_weights,) + grad_output.shape[indices.ndim :] + ) + return aten._unsafe_index_put(grad_weight, [indices], grad, accumulate=True).to( + result_dtype + ) + + +def prod(x: List[int]): + r = 1 + for i in x: + r *= i + return r + + +@register_decomposition(aten.split_with_sizes) +def split_with_sizes( + self: Tensor, split_sizes: List[int], dim: int = 0 +) -> List[Tensor]: + torch._check_with( + ValueError, + sum(split_sizes) == self.shape[dim], + lambda: f"Split sizes add up to {sum(split_sizes)} but got the tensor's size of {self.shape[dim]}", + ) + num_splits = len(split_sizes) + splits = [] + start_idx = 0 + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import expect_true + + for i in range(num_splits): + length = split_sizes[i] + torch._check_is_size( + length, + lambda: "split_with_sizes expects split_sizes have only non-negative entries", + ) + # We know this is true thanks to the sum, but this assertion helps + # out our internal reasoning + expect_true(start_idx + length <= self.shape[dim]) + splits.append(self.narrow(dim, start_idx, length)) + start_idx += length + return splits + + +@register_decomposition(aten.unsafe_split.Tensor) +def unsafe_split(input: Tensor, split_size: int, dim: int = 0) -> Tuple[Tensor, ...]: + return aten.split.Tensor(input, split_size, dim) + + +@register_decomposition(aten.unsafe_split_with_sizes.default) +def unsafe_split_with_sizes( + input: Tensor, split_sizes: List[int], dim: int = 0 +) -> Tuple[Tensor, ...]: + return aten.split_with_sizes.default(input, split_sizes, dim) + + +@register_decomposition(aten.split.Tensor) +def split(self: Tensor, split_size: int, dim: int = 0) -> Tuple[Tensor, ...]: + input_sizes = self.shape + dim_size = input_sizes[dim] + if split_size == 0: + assert dim_size == 0 + return (self,) + chunks = (dim_size + split_size - 1) // split_size + + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import guard_int + + chunks = guard_int(chunks) + split_sizes = [split_size for i in range(chunks)] + split_sizes[-1] = split_size - (split_size * chunks - dim_size) + return torch.split(self, split_sizes, dim) + + +@aten.tensor_split.tensor_indices_or_sections.py_impl( + DispatchKey.CompositeImplicitAutograd +) +def tensor_split_tensor_indices_or_sections_py_impl( + self: Tensor, + tensor_indices_or_sections: Tensor, + dim: int = 0, +) -> List[Tensor]: + assert tensor_indices_or_sections.device.type == "cpu" + assert tensor_indices_or_sections.dtype == torch.int64 + split_dim = tensor_indices_or_sections.dim() + torch._check( + split_dim == 1 or split_dim == 0, + lambda: "tensor_split expected tensor_indices_or_sections to be a zero-dimensional " + f"or one-dimensional tensor, but got a tensor with {split_dim} dims", + ) + if split_dim == 0: + sections = tensor_indices_or_sections.item() + assert isinstance(sections, IntLike) + return self.tensor_split(sections, dim) + else: + indices = [i.item() for i in tensor_indices_or_sections] + return self.tensor_split(indices, dim) + + +# TODO: this doesn't appear to have enough precision in bfloat16 +@register_decomposition(aten.addmm) +@out_wrapper() +@pw_cast_for_opmath +def addmm(self: Tensor, mat1: Tensor, mat2: Tensor, beta: int = 1, alpha: int = 1): + if not self.is_floating_point() and not self.is_complex(): + beta = int(beta) + alpha = int(alpha) + out = alpha * torch.mm(mat1, mat2) + if beta == 0: + return out + + # The output of aten.addmm is contiguous, we need to match this behavior in the decomposition. + # The original implementation 'beta * self + out' would return a strided tensor if `self` is strided. + # We thus use `out`, the output of torch.mm, which is always contiguous, as the first argument for addition. + # This is relying on TensorIterator's behavior that it takes higher precedence on the stride of first input. + # Alternative, we can write `(beta * self + out).contiguous()`, but it introduces another copy in some cases. + # This implementation is not ideal, and we should revisit this when we have a better solution. + return out + beta * self + + +@register_decomposition(aten._addmm_activation) +@out_wrapper() +@pw_cast_for_opmath +def _addmm_activation( + self: Tensor, + mat1: Tensor, + mat2: Tensor, + beta: int = 1, + alpha: int = 1, + use_gelu: bool = False, +): + out = addmm(self, mat1, mat2, beta, alpha) + if use_gelu: + if self.is_cuda: + return aten.gelu(out, approximate="tanh") + else: + return aten.gelu(out) + return aten.relu(out) + + +@register_decomposition(aten.addmv) +@out_wrapper() +@pw_cast_for_opmath +def addmv(self: Tensor, mat1: Tensor, vec: Tensor, beta: int = 1, alpha: int = 1): + if not self.is_floating_point() and not self.is_complex(): + beta = int(beta) + alpha = int(alpha) + out = alpha * torch.mv(mat1, vec) + if beta == 0: + return out + return out + beta * self + + +@register_decomposition(aten.native_group_norm_backward.default) +@pw_cast_for_opmath +def native_group_norm_backward( + grad_output: Tensor, + input: Tensor, + mean: Tensor, + rstd: Tensor, + gamma: Optional[Tensor], + N: int, + C: int, + HxW: int, + group: int, + output_mask: List[bool], +) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + utils.check_same_device( + grad_output, input, mean, rstd, allow_cpu_scalar_tensors=False + ) + utils.check_same_shape(input, grad_output, allow_cpu_scalar_tensors=False) + utils.check_same_shape(mean, rstd, allow_cpu_scalar_tensors=False) + torch._check( + input.numel() == N * C * HxW, + lambda: f"Expect input to have { N * C * HxW} elements", + ) + torch._check( + mean.shape == (N, group), + lambda: f"Expect mean to have shape ({N}, {group}, but got {mean.shape}", + ) + torch._check( + gamma is None or gamma.numel() == C, + lambda: f"Expect gamma to have {C} elements but got {gamma.numel() if gamma is not None else -1}", + ) + + cpg, _rem = divmod(C, group) + torch._check( + _rem == 0, + lambda: f"Expect number of channels {C} to be evenly-divisible by number of groups {group}", + ) + + # Compute Internal gradients + ds = torch.mul(grad_output, input).view(N, C, HxW).sum(dim=[2]) + db = grad_output.view(N, C, HxW).sum(dim=[2]) + + d_input: Optional[Tensor] = None + d_gamma: Optional[Tensor] = None + d_bias: Optional[Tensor] = None + if output_mask[0]: + s = 1.0 / (HxW * cpg) + if gamma is not None: + ds_val = torch.mul(ds, gamma.unsqueeze(0)).reshape(N, group, cpg).sum(2) + db_val = torch.mul(db, gamma.unsqueeze(0)).reshape(N, group, cpg).sum(2) + c1 = torch.mul( + rstd.unsqueeze(-1), + gamma.reshape(1, group, cpg), + ) + else: + ds_val = ds.reshape(N, group, cpg).sum(2) + db_val = db.reshape(N, group, cpg).sum(2) + c1 = torch.mul( + rstd.unsqueeze(-1), + torch.ones((1, group, cpg), device=rstd.device), + ) + c2 = (db_val * mean - ds_val) * rstd * rstd * rstd * s + c3 = -c2 * mean - db_val * rstd * s + + c1 = c1.unsqueeze(-1) + c2 = _unsqueeze_to_dim(c2, 4) + c3 = _unsqueeze_to_dim(c3, 4) + d_input = ( + torch.mul(grad_output.reshape(N, group, cpg, HxW), c1) + + torch.mul(input.reshape(N, group, cpg, HxW), c2) + + c3 + ) + d_input = d_input.reshape(input.shape).to(input.dtype) + if output_mask[1]: + d_gamma = ( + ( + (ds.view(N, group, cpg) - db.view(N, group, cpg) * mean.unsqueeze(-1)) + * rstd.unsqueeze(-1) + ) + .sum(dim=[0]) + .reshape(C) + ) + if output_mask[2]: + d_bias = db.sum(dim=[0]) + + return (d_input, d_gamma, d_bias) + + +# out_wrapper currently does not allow optional outputs +@register_decomposition(aten.native_group_norm_backward.out) +def native_group_norm_backward_out( + grad_output: Tensor, + input: Tensor, + mean: Tensor, + rstd: Tensor, + gamma: Optional[Tensor], + N: int, + C: int, + HxW: int, + group: int, + output_mask: List[bool], + *, + out0: torch.Tensor, + out1: torch.Tensor, + out2: torch.Tensor, +) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + result = native_group_norm_backward( + grad_output, input, mean, rstd, gamma, N, C, HxW, group, output_mask + ) + grad_input = (out0, out1, out2) + for i, r in enumerate(result): + if r is not None: + _maybe_resize_out(grad_input[i], r.shape) + _safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True) + + return grad_input + + +def _maybe_cast(x: Optional[Tensor], dtype) -> Optional[Tensor]: + if x is not None: + return x.to(dtype) + return x + + +# TODO: Take a closer look at the type promotion semantics +@register_decomposition(aten.native_layer_norm_backward.default) +def native_layer_norm_backward( + grad_out: Tensor, + input: Tensor, + normalized_shape: List[int], + mean: Tensor, + rstd: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + output_mask: List[bool], +) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + input_shape = input.shape + input_ndim = input.dim() + computation_dtype = utils.get_computation_dtype(input.dtype) + grad_out_cast, input_cast, weight_cast, bias_cast = ( + x.to(computation_dtype).contiguous() if x is not None else x + for x in (grad_out, input, weight, bias) + ) + assert grad_out_cast is not None + + axis = input_ndim - len(normalized_shape) + inner_dims = input_shape[axis:] + outer_dims = input_shape[:axis] + inner_dim_indices: List[int] = [] + outer_dim_indices: List[int] = [] + for i in range(input_ndim): + if i >= axis: + inner_dim_indices.append(i) + else: + outer_dim_indices.append(i) + + N = prod(inner_dims) # type: ignore[arg-type] + M = prod(outer_dims) # type: ignore[arg-type] + if M <= 0 or N <= 0: + return ( + input.new_zeros(input_shape) if output_mask[0] else None, + input.new_zeros(input_shape[axis:]) if output_mask[1] else None, + input.new_zeros(input_shape[axis:]) if output_mask[2] else None, + ) + mean = _unsqueeze_to_dim(mean, input_cast.dim()) # type: ignore[union-attr] + rstd = _unsqueeze_to_dim(rstd, input_cast.dim()) # type: ignore[union-attr] + x_hat = (input_cast - mean) * rstd + if weight_cast is not None: + grad_x_hat = grad_out_cast * weight_cast + else: + grad_x_hat = grad_out_cast + a = grad_x_hat * N + b = torch.sum(grad_x_hat, inner_dim_indices, True) + c1 = torch.mul(grad_x_hat, x_hat) + c2 = torch.sum(c1, inner_dim_indices, True) + c3 = torch.mul(x_hat, c2) + + inner = a - b - c3 + d_input: Optional[Tensor] = None + d_weight: Optional[Tensor] = None + d_bias: Optional[Tensor] = None + if output_mask[0]: + d_input = (rstd / N) * inner + + if output_mask[1] and weight_cast is not None: + if len(outer_dim_indices) > 0: + d_weight = torch.sum(grad_out_cast * x_hat, outer_dim_indices, False) + else: + d_weight = grad_out_cast * x_hat + + if output_mask[2] and bias_cast is not None: + if len(outer_dim_indices) > 0: + d_bias = torch.sum(grad_out_cast, outer_dim_indices, False) + else: + d_bias = grad_out_cast.clone() + + return ( + _maybe_cast(d_input, input.dtype), + _maybe_cast(d_weight, input.dtype), + _maybe_cast(d_bias, input.dtype), + ) + + +# out_wrapper currently does not allow optional outputs +@register_decomposition(aten.native_layer_norm_backward.out) +def native_layer_norm_backward_out( + grad_out: Tensor, + input: Tensor, + normalized_shape: List[int], + mean: Tensor, + rstd: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + output_mask: List[bool], + *, + out0: torch.Tensor, + out1: torch.Tensor, + out2: torch.Tensor, +) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + result = native_layer_norm_backward( + grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask + ) + grad_input = (out0, out1, out2) + for i, r in enumerate(result): + if r is not None: + _maybe_resize_out(grad_input[i], r.shape) + _safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True) + + return grad_input + + +def native_batch_norm_helper( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + training: bool, + momentum: float, + eps: float, + functional: bool, +) -> Tuple[Tensor, Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: + reduction_dims = [0] + list(range(2, input.dim())) + computation_dtype = utils.get_computation_dtype(input.dtype) + new_running_mean = running_mean + new_running_var = running_var + if training: + computation_dtype = utils.get_computation_dtype(input.dtype) + input_acc = input.to(dtype=computation_dtype) + biased_var, mean = torch.var_mean( + input_acc, dim=reduction_dims, correction=0, keepdim=True + ) + rstd = torch.rsqrt(biased_var + eps) + + output = (input - mean) * rstd + + save_mean = torch.squeeze(mean, reduction_dims) + save_rstd = torch.squeeze(rstd, reduction_dims) + if running_mean is not None: + new_running_mean = momentum * save_mean + (1 - momentum) * running_mean + if not functional: + running_mean.copy_(new_running_mean) + if running_var is not None: + n = input.numel() / input.shape[1] + # This doesn't strictly match eager's numerics, which accumulates var sum and then directly applies the correction + # But... that would require re-implementing var here, for negligible numerics gain on a tensor whose + # numerics probably don't matter. + squeezed_var = torch.squeeze(biased_var, reduction_dims) + unbiased_var = squeezed_var * (n / (n - 1)) + new_running_var = momentum * unbiased_var + (1 - momentum) * running_var + if not functional: + running_var.copy_(new_running_var) + else: + assert running_mean is not None and running_var is not None + running_mean = running_mean.to(dtype=computation_dtype, copy=True) + new_running_mean = running_mean + running_var = running_var.to(dtype=computation_dtype, copy=True) + new_running_var = running_var + mean = running_mean + invstd = 1 / (torch.sqrt(running_var + eps)) + # Very annoying inconsistency where CPU and CUDA give different shapes + if input.device.type != "cpu": + save_mean = running_mean + save_rstd = invstd + else: + save_mean = input.new_zeros((0,)) + save_rstd = input.new_zeros((0,)) + mean = _unsqueeze_to_dim(mean, input.dim() - 1) + invstd = _unsqueeze_to_dim(invstd, input.dim() - 1) + output = (input - mean) * invstd + + if weight is not None: + weight = weight.flatten() + weight = _unsqueeze_to_dim(weight, input.dim() - 1) + output = output * weight + + if bias is not None: + bias = bias.flatten() + bias = _unsqueeze_to_dim(bias, input.dim() - 1) + output = output + bias + + if input.device.type == "cpu": + save_mean = save_mean.to(dtype=input.dtype) + save_rstd = save_rstd.to(dtype=input.dtype) + return ( + output.to(dtype=input.dtype), + save_mean, + save_rstd, + new_running_mean, + new_running_var, + ) + + +@register_decomposition(aten.native_batch_norm) +@out_wrapper("out", "save_mean", "save_invstd") +def native_batch_norm( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + training: bool, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + output, save_mean, save_rstd, _, _ = native_batch_norm_helper( + input, weight, bias, running_mean, running_var, training, momentum, eps, False + ) + return output, save_mean, save_rstd + + +# TODO: this decomposition is NOT here to stay. We would much prefer replacing native_batch_norm +# with our new correctly schema'd _native_batch_norm_legit and its variants, but +# we cannot do that immediately in the C++ because it would be forwards incompatible +# with some mobile use cases. +# +# Since this change is most impactful for aot autograd/functionalization, we simply +# register this decomposition on the Autograd key for the python dispatcher (which is +# currently only used by aot autograd/functionalization and no one else, really). +# In two weeks or so, we should remove this decomposition and phase out the current native_batch_norm +# to be _native_batch_norm_legit and have the right schema (stating that there are input mutations). +@aten.native_batch_norm.default.py_impl(DispatchKey.Autograd) +@aten.native_batch_norm.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def native_batch_norm_decomposition( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + training: bool, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + if running_mean is None and running_var is None: + return aten._native_batch_norm_legit( + input, weight, bias, training, momentum, eps + ) + if running_mean is None: + raise RuntimeError( + "running_mean is None, but running_var is provided. " + "They should both be None or both be provided." + ) + if running_var is None: + raise RuntimeError( + "running_var is None, but running_mean is provided. " + "They should both be None or both be provided." + ) + if training: + # HACK: batch norm consolidation should clean this up so this op doesn't take in a training arg. + return aten._native_batch_norm_legit( + input, weight, bias, running_mean, running_var, training, momentum, eps + ) + else: + return aten._native_batch_norm_legit_no_training( + input, weight, bias, running_mean, running_var, momentum, eps + ) + + +@aten.unsafe_chunk.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def unsafe_chunk_py_impl(tensor, chunks, dim=0) -> List[Tensor]: + dim_size = tensor.size(dim) + split_size = (dim_size + chunks - 1) // chunks + + if split_size == 0 and dim_size == 0: + split_sizes = [split_size for _ in chunks] + split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size) + return torch.ops.aten.unsafe_split_with_sizes.default(tensor, split_sizes, dim) + return torch.ops.aten.unsafe_split.Tensor(tensor, split_size, dim) + + +@register_decomposition(aten._native_batch_norm_legit_no_training.default) +def _native_batch_norm_legit_no_training( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Tensor, + running_var: Tensor, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + return aten._native_batch_norm_legit.default( + input, + weight, + bias, + running_mean, + running_var, + False, # training + momentum, + eps, + ) + + +@register_decomposition(aten._native_batch_norm_legit.default) +def _native_batch_norm_legit( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Tensor, + running_var: Tensor, + training: bool, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + output, save_mean, save_rstd, _, _ = native_batch_norm_helper( + input, weight, bias, running_mean, running_var, training, momentum, eps, False + ) + return output, save_mean, save_rstd + + +@register_decomposition(aten._native_batch_norm_legit.no_stats) +def _native_batch_norm_legit_no_stats( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + training: bool, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + output, save_mean, save_rstd, _, _ = native_batch_norm_helper( + input, weight, bias, None, None, training, momentum, eps, False + ) + return output, save_mean, save_rstd + + +@register_decomposition(aten._native_batch_norm_legit_functional.default) +def _native_batch_norm_legit_functional( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + running_mean: Tensor, + running_var: Tensor, + training: bool, + momentum: float, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + ( + output, + save_mean, + save_rstd, + new_running_mean, + new_running_var, + ) = native_batch_norm_helper( + input, weight, bias, running_mean, running_var, training, momentum, eps, True + ) + assert new_running_mean is not None, "new_running_mean should not be None" + assert new_running_var is not None, "new_running_var should not be None" + return output, save_mean, save_rstd, new_running_mean, new_running_var + + +@register_decomposition(aten._fused_dropout) +@out_wrapper("out0", "out1") +@pw_cast_for_opmath +def _fused_dropout_decomposition(input, p, generator=None): + assert generator is None + mask = (torch.rand_like(input) < p).to(dtype=torch.uint8) + res = mask.type_as(input) * input * (1.0 / p) + return (res, mask) + + +def device_hint(tensor): + if isinstance(tensor, torch._subclasses.FakeTensor): + return tensor.fake_device + else: + return None + + +def wrap_output_with_input_device_(x, common_device): + # wrap meta tensor + if common_device is not None and x.device.type == "meta": + from torch._subclasses.fake_tensor import FakeTensorMode + + fake_mode = FakeTensorMode() + fake_mode.in_kernel_invocation = True + converter = fake_mode.fake_tensor_converter + return converter.from_meta_and_device(fake_mode, x, common_device) + + return x + + +@register_decomposition(aten._to_copy) +@out_wrapper() +def _to_copy( + x: Tensor, + *, + dtype: Optional[torch.dtype] = None, + layout=None, + device: Optional[torch.device] = None, + pin_memory: bool = False, + non_blocking: bool = False, + memory_format: Optional[torch.memory_format] = None, +): + assert not layout or layout == torch.strided, "TODO" + assert not pin_memory, "TODO" + if device is None and dtype is None and memory_format is None: + return x.clone() + dtype_converted = False + common_device = device_hint(x) + if device is not None and device != x.device: + # avoid conversions on cpu + if dtype is not None and device.type == "cpu": + x = torch._prims.convert_element_type(x, dtype) + dtype_converted = True + x = torch._prims.device_put(x, device) + if dtype is not None and not dtype_converted: + x = torch._prims.convert_element_type(x, dtype) + dtype_converted = True + # In case of dtype promotion, faketensor converted into tensor. + # Need to convert into faketensor if input was a faketensor. + if dtype_converted: + x = wrap_output_with_input_device_(x, common_device) + if memory_format is not None: # no ref/prim for memory format + return torch.clone(x, memory_format=memory_format) + return x + + +# Questionable decompositions +# This is only valid if we're running the graph without autograd, such as if the backward pass has been traced. +# Note that this decomposition causes issues with in-place ops +@register_decomposition([aten.detach, aten.lift, aten.lift_fresh]) +@out_wrapper() +def nop_decomposition(x): + return aten.alias(x) + + +# Also register to the Autograd dispatch key, so this decomp can run above autograd. +# native_batch_norm needs to decompose into other ops before autograd. +@aten.cudnn_batch_norm.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.cudnn_batch_norm) +@out_wrapper("out0", "out1", "out2", "out3") +def cudnn_batch_norm( + input: Tensor, + weight: Tensor, + bias: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + training: bool, + exponential_average_factor: float, + epsilon: float, +): + a, b, c = aten.native_batch_norm( + input, + weight, + bias, + running_mean, + running_var, + training, + exponential_average_factor, + epsilon, + ) + # Cudnn return running mean and variance when training is True + if training: + return (a, b, c, input.new_zeros((0,), dtype=torch.uint8)) + return ( + a, + weight.new_zeros((0,)), + weight.new_zeros((0,)), + input.new_zeros((0,), dtype=torch.uint8), + ) + + +def _broadcast_batch_norm_backward(x, broadcast_mask): + for axis, mask in enumerate(broadcast_mask): + if mask == 1 and not (axis < x.ndim and x.shape[axis] == broadcast_mask[axis]): + x = x.unsqueeze(axis) + return x + + +@register_decomposition(aten.native_batch_norm_backward.default) +def native_batch_norm_backward( + grad_out: Tensor, + input: Tensor, + weight: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + save_mean: Optional[Tensor], + save_invstd: Optional[Tensor], + train: bool, + eps: float, + output_mask: List[bool], +) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: + input_dtype = input.dtype + if weight is not None: + weight_dtype = weight.dtype + else: + weight_dtype = input_dtype + computation_dtype = utils.get_computation_dtype(input.dtype) + ( + grad_out_cast, + input_cast, + weight_cast, + running_mean_cast, + running_var_cast, + save_mean_cast, + save_invstd_cast, + ) = ( + x.to(computation_dtype) if x is not None else x + for x in ( + grad_out, + input, + weight, + running_mean, + running_var, + save_mean, + save_invstd, + ) + ) + input_shape = input.shape + input_rank = input.dim() + assert input_rank >= 2, "rank of the input must be at least 2" + + axis = 1 + num_features = prod(list(input_shape)) / input_shape[axis] + mean = save_mean_cast + invstd = save_invstd_cast + if train: + assert save_mean_cast is not None and save_invstd_cast is not None + else: + assert running_mean_cast is not None and running_var_cast is not None + mean = running_mean_cast + invstd = torch.rsqrt(running_var_cast + eps) + + broadcast_mask: List[int] = [1] * input_rank + broadcast_mask[axis] = input_shape[axis] + + reduction_axes: List[int] = [] + for i in range(input_rank): + if i != axis: + reduction_axes.append(i) + + mean = _broadcast_batch_norm_backward(mean, broadcast_mask) # type: ignore[arg-type] + norm = 1.0 / num_features + grad_output_sum = torch.sum(grad_out_cast, reduction_axes) # type: ignore[arg-type] + dot_p = torch.sum(grad_out_cast * (input_cast - mean), reduction_axes) # type: ignore[operator] + + grad_mean = _broadcast_batch_norm_backward(grad_output_sum * norm, broadcast_mask) + proj_scale = _broadcast_batch_norm_backward(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask) # type: ignore[operator] + + if weight_cast is None: + grad_scale = _broadcast_batch_norm_backward(invstd, broadcast_mask) * 1.0 # type: ignore[arg-type] + else: + grad_scale = _broadcast_batch_norm_backward( + invstd * weight_cast, broadcast_mask + ) + + if train: + proj = (input_cast - mean) * proj_scale # type: ignore[operator] + grad_input = ((grad_out_cast - proj) - grad_mean) * grad_scale + else: + grad_input = grad_out_cast * grad_scale + + if output_mask[1]: + grad_weight = dot_p * invstd + else: + grad_weight = None # "None" doesn't work with vjp, should use zeros for vjp + + if output_mask[2]: + grad_bias = grad_output_sum + else: + grad_bias = None # "None" doesn't work with vjp, should use zeros for vjp + + return ( + grad_input.to(input_dtype), + _maybe_cast(grad_weight, weight_dtype), + _maybe_cast(grad_bias, weight_dtype), + ) + + +# out_wrapper currently does not allow optional outputs +@register_decomposition(aten.native_batch_norm_backward.out) +def native_batch_norm_backward_out( + grad_out: Tensor, + input: Tensor, + weight: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + save_mean: Optional[Tensor], + save_invstd: Optional[Tensor], + train: bool, + eps: float, + output_mask: List[bool], + *, + out0: torch.Tensor, + out1: torch.Tensor, + out2: torch.Tensor, +) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: + result = native_batch_norm_backward( + grad_out, + input, + weight, + running_mean, + running_var, + save_mean, + save_invstd, + train, + eps, + output_mask, + ) + grad_input = (out0, out1, out2) + for i, r in enumerate(result): + if r is not None: + _maybe_resize_out(grad_input[i], r.shape) + _safe_copy_out(copy_from=r, copy_to=grad_input[i], exact_dtype=True) + + return grad_input + + +@register_decomposition(aten.cudnn_batch_norm_backward) +@out_wrapper("out0", "out1", "out2") +def cudnn_batch_norm_backward( + input: Tensor, + grad_output: Tensor, + weight: Tensor, + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + save_mean: Optional[Tensor], + save_var: Optional[Tensor], + epsilon: float, + reserveSpace: Tensor, +): + return aten.native_batch_norm_backward( + grad_output, + input, + weight, + running_mean, + running_var, + save_mean, + save_var, + True, + epsilon, + [True, True, True], + ) + + +@register_decomposition(aten._adaptive_avg_pool2d) +@out_wrapper() +@pw_cast_for_opmath +def adaptive_avg_pool2d(input: Tensor, output_size: Tuple[int, int]): + # Preconditions + device = input.device + shape = input.shape + ndim = len(shape) + torch._check( + ndim in (3, 4), + lambda: f"adaptive_avg_pool2d(): Expected 3D or 4D tensor, but got {ndim}", + ) + for d in input.shape[-2:]: + torch._check( + d != 0, + lambda: "adaptive_avg_pool2d(): Expected input to have non-zero size for " + f"non-batch dimensions, but input has shape {tuple(shape)}.", + ) + + # Optimisation (we should also do this in the kernel implementation) + if shape[-2] % output_size[-2] == 0 and shape[-1] % output_size[-1] == 0: + stride = tuple(i // o for i, o in zip(shape[-2:], output_size)) + kernel = tuple( + i - (o - 1) * s for i, o, s in zip(shape[-2:], output_size, stride) + ) + return torch.nn.functional.avg_pool2d(input, kernel, stride) + + def start_index(a, b, c): + return torch.div(a * c, b, rounding_mode="trunc") + + def end_index(a, b, c): + return torch.div((a + 1) * c + b - 1, b, rounding_mode="trunc") + + def compute_idx(in_size, out_size): + orange = torch.arange(out_size, device=device, dtype=torch.int64) + i0 = start_index(orange, out_size, in_size) + # Let length = end_index - start_index, i.e. the length of the pooling kernels + # length.max() can be computed analytically as follows: + maxlength = in_size // out_size + 1 + in_size_mod = in_size % out_size + # adaptive = True iff there are kernels with different lengths + adaptive = not (in_size_mod == 0 or out_size % in_size_mod == 0) + if adaptive: + maxlength += 1 + elif in_size_mod == 0: + maxlength -= 1 + + range_max = torch.arange(maxlength, device=device, dtype=torch.int64) + idx = i0.unsqueeze(-1) + range_max + if adaptive: + # Need to clamp to avoid accessing out-of-bounds memory + # TODO make minimum accept scalars + maxval = torch.scalar_tensor( + in_size - 1, dtype=idx.dtype, device=idx.device + ) + idx = torch.minimum(idx, maxval) + + # Compute the length + i1 = end_index(orange, out_size, in_size) + length = i1 - i0 + else: + length = maxlength + return idx, length, range_max, adaptive + + # length is not None if it's constant, otherwise we'll need to compute it + idxh, length_h, range_max_h, adaptive_h = compute_idx(shape[-2], output_size[-2]) + idxw, length_w, range_max_w, adaptive_w = compute_idx(shape[-1], output_size[-1]) + + vals = input[..., _unsqueeze_to_dim(idxh, 4), idxw] + # Shortcut for the simpler case + if not adaptive_h and not adaptive_w: + return torch.mean(vals, dim=(-3, -1)) + + def maybe_mask(vals, length, range_max, adaptive, dim): + if isinstance(length, IntLike): + return vals, length + else: + # zero-out the things we didn't really want to select + assert dim < 0 + # hack + mask = range_max >= length.unsqueeze(-1) + if dim == -2: + mask = _unsqueeze_to_dim(mask, 4) + vals = torch.masked_fill(vals, mask, 0.0) + # Compute the length of each window + length = _unsqueeze_to_dim(length, -dim) + return vals, length + + vals, length_h = maybe_mask( + vals, length_h, range_max_h, adaptive=adaptive_h, dim=-2 + ) + vals, length_w = maybe_mask( + vals, length_w, range_max_w, adaptive=adaptive_w, dim=-1 + ) + + # We unroll the sum as we assume that the kernels are going to be small + ret = None + for i, j in product(range(vals.shape[-3]), range(vals.shape[-1])): + if ret is None: + ret = vals[..., i, :, j] + else: + ret = ret + vals[..., i, :, j] + return ret / (length_h * length_w) + + +@register_decomposition(aten.index_add_) +def index_add_( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + return _index_add(x, dim, index, tensor, inplace=True, alpha=alpha) + + +@register_decomposition(aten.index_add) +@out_wrapper() +def index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + return _index_add(x, dim, index, tensor, inplace=False, alpha=alpha) + + +def _index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + inplace: bool, + alpha: NumberType = 1, +): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + index_size = index.size(0) if index.ndim == 1 else 1 + tensor_size = tensor.size(dim) if tensor.ndim > 0 else 1 + torch._check( + tensor_size == index_size, + lambda: f"Number of indices ({index_size}) should be equal to tensor.size(dim) ({tensor_size}), for {dim=}", + ) + if alpha != 1: + python_type = utils.dtype_to_type(x.dtype) + torch._check( + python_type == bool + or utils.is_weakly_lesser_type(type(alpha), python_type), + lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!", + ) + tensor = tensor * alpha + # Treat scalars as elements of \R^1 + zero_dim = x.ndim == 0 + x1 = x.unsqueeze(0) if zero_dim else x + idx = (None,) * dim + (index,) + index_put = aten.index_put_ if inplace else aten.index_put + out = index_put(x1, idx, tensor, accumulate=True) + if inplace: + return x + else: + return out.squeeze(0) if zero_dim else out.contiguous() + + +@register_decomposition(aten.index_copy_) +def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return _index_copy(x, dim, index, tensor, inplace=True) + + +@register_decomposition(aten.index_copy) +@out_wrapper() +def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return _index_copy(x, dim, index, tensor, inplace=False) + + +def _index_copy( + x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike, *, inplace: bool +): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + # Treat scalars as elements of \R^1 + zero_dim = x.ndim == 0 + x1 = x.unsqueeze(0) if zero_dim else x + idx = (None,) * dim + (index,) + index_put = aten.index_put_ if inplace else aten.index_put + out = index_put(x1, idx, tensor) + if inplace: + return x + else: + return out.squeeze(0) if zero_dim else out.contiguous() + + +# nb: Should use acc_t, not op_math +@register_decomposition(aten.log_sigmoid_forward) +@out_wrapper("output", "buffer") +@pw_cast_for_opmath +def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]: + min = torch.minimum(self.new_zeros(()), self) + z = torch.exp(-torch.abs(self)) + if self.is_cuda: + buffer = self.new_zeros((0,)) + else: + buffer = z + return min - torch.log1p(z), buffer + + +@register_decomposition(aten.uniform) +@out_wrapper() +def uniform( + x: Tensor, + low: Union[bool, int, float] = 0.0, + high: Union[bool, int, float] = 1.0, + generator: Optional[torch.Generator] = None, +): + return prims._uniform_helper( + x.shape, + low=sym_float(low), + high=sym_float(high), + dtype=x.dtype, + device=x.device, + generator=generator, + ) + + +@register_decomposition(aten.uniform_) +def uniform_(self, low=0, high=1, generator=None): + return self.copy_(uniform(self, low, high, generator)) + + +# aten/src/ATen/native/UpSample.cpp compute_output_size +def upsample_compute_output_size(input_size, output_size, scale_factors): + spatial_dimensions = len(input_size) - 2 + if output_size is not None: + torch._check( + scale_factors is None, + lambda: "Must specify exactly one of output_size and scale_factors", + ) + torch._check(len(output_size) == spatial_dimensions, lambda: "") + return output_size + if scale_factors is not None: + # NB: this isn't necessary lol + torch._check( + output_size is None, + lambda: "Must specify exactly one of output_size and scale_factors", + ) + torch._check(len(scale_factors) == spatial_dimensions, lambda: "") + output_size = [] + for i, s in enumerate(scale_factors): + if int(s) == s: + output_size.append(input_size[i + 2] * int(s)) + else: + output_size.append(sym_int(input_size[i + 2] * s)) + return output_size + torch._check( + False, lambda: "Must specify exactly one of output_size and scale_factors" + ) + + +def get_scale_value(scales, idx): + if scales is None: + return None + return scales[idx] + + +@register_decomposition(aten.upsample_nearest1d.vec) +@aten.upsample_nearest1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.upsample_nearest1d.vec.py_impl(DispatchKey.Autograd) +def upsample_nearest1d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale = get_scale_value(scale_factors, 0) + + return aten.upsample_nearest1d.default(input, osize, scale) + + +@register_decomposition(aten._upsample_nearest_exact1d.vec) +@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.Autograd) +def _upsample_nearest_exact1d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale = get_scale_value(scale_factors, 0) + + return aten._upsample_nearest_exact1d.default(input, osize, scale) + + +@register_decomposition(aten.upsample_nearest2d.vec) +@aten.upsample_nearest2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.upsample_nearest2d.vec.py_impl(DispatchKey.Autograd) +def upsample_nearest2d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_h = get_scale_value(scale_factors, 0) + scale_w = get_scale_value(scale_factors, 1) + + return aten.upsample_nearest2d.default(input, osize, scale_h, scale_w) + + +@register_decomposition(aten._upsample_nearest_exact2d.vec) +@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.Autograd) +def _upsample_nearest_exact2d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_h = get_scale_value(scale_factors, 0) + scale_w = get_scale_value(scale_factors, 1) + + return aten._upsample_nearest_exact2d.default(input, osize, scale_h, scale_w) + + +@register_decomposition(aten.upsample_nearest3d.vec) +@aten.upsample_nearest3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.upsample_nearest3d.vec.py_impl(DispatchKey.Autograd) +def upsample_nearest3d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_d = get_scale_value(scale_factors, 0) + scale_h = get_scale_value(scale_factors, 1) + scale_w = get_scale_value(scale_factors, 2) + + return aten.upsample_nearest3d.default(input, osize, scale_d, scale_h, scale_w) + + +@register_decomposition(aten._upsample_nearest_exact3d.vec) +@aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.Autograd) +def _upsample_nearest_exact3d_vec(input, output_size, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_d = get_scale_value(scale_factors, 0) + scale_h = get_scale_value(scale_factors, 1) + scale_w = get_scale_value(scale_factors, 2) + + return aten._upsample_nearest_exact3d.default( + input, osize, scale_d, scale_h, scale_w + ) + + +def _compute_upsample_nearest_indices(input, output_size, scales, exact=False): + # For each dim in output_size, compute the set of input indices used + # to produce the upsampled output. + indices = [] + num_spatial_dims = len(output_size) + offset = 0.5 if exact else 0.0 + + for d in range(num_spatial_dims): + # Math matches aten/src/ATen/native/cpu/UpSampleKernel.cpp + # + # Indices are computed as following: + # scale = isize / osize + # Case: exact=False + # input_index = floor(output_index * scale) + # Same as OpenCV INTER_NEAREST + # + # Case: exact=False + # index_f32 = (output_index + 0.5) * scale - 0.5 + # input_index = round(index_f32) + # Same as Pillow and Scikit-Image/Scipy ndi.zoom + osize = output_size[d] + isize = input.shape[-num_spatial_dims + d] + scale = isize / (isize * scales[d]) if scales[d] is not None else isize / osize + + output_indices = torch.arange(osize, dtype=torch.float32, device=input.device) + input_indices = ((output_indices + offset) * scale).to(torch.int64) + for _ in range(num_spatial_dims - 1 - d): + input_indices = input_indices.unsqueeze(-1) + indices.append(input_indices) + return tuple(indices) + + +@register_decomposition(aten.upsample_nearest1d.default) +@aten.upsample_nearest1d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def upsample_nearest1d( + input: Tensor, + output_size: List[int], + scales: Optional[float] = None, +) -> Tensor: + (l_indices,) = _compute_upsample_nearest_indices(input, output_size, (scales,)) + return aten._unsafe_index(input, (None, None, l_indices)) + + +@register_decomposition(aten._upsample_nearest_exact1d.default) +@aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def _upsample_nearest_exact1d( + input: Tensor, + output_size: List[int], + scales: Optional[float] = None, +) -> Tensor: + (l_indices,) = _compute_upsample_nearest_indices( + input, output_size, (scales,), exact=True + ) + return aten._unsafe_index(input, (None, None, l_indices)) + + +def _upsample_nearest2d_common(input, h_indices, w_indices): + result = aten._unsafe_index(input, (None, None, h_indices, w_indices)) + + # convert output to correct memory format, if necessary + memory_format = utils.suggest_memory_format(input) + + # following "heuristic: only use channels_last path when it's faster than the contiguous path" + _, n_channels, _, _ = input.shape + if input.device.type == "cuda" and n_channels < 4: + memory_format = torch.contiguous_format + + result = result.contiguous(memory_format=memory_format) + return result + + +@register_decomposition(aten.upsample_nearest2d.default) +@aten.upsample_nearest2d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def upsample_nearest2d( + input: Tensor, + output_size: List[int], + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +) -> Tensor: + h_indices, w_indices = _compute_upsample_nearest_indices( + input, output_size, (scales_h, scales_w) + ) + return _upsample_nearest2d_common(input, h_indices, w_indices) + + +@register_decomposition(aten._upsample_nearest_exact2d.default) +@aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def _upsample_nearest_exact2d( + input: Tensor, + output_size: List[int], + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +) -> Tensor: + h_indices, w_indices = _compute_upsample_nearest_indices( + input, output_size, (scales_h, scales_w), exact=True + ) + return _upsample_nearest2d_common(input, h_indices, w_indices) + + +@register_decomposition(aten.upsample_nearest3d.default) +@aten.upsample_nearest3d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def upsample_nearest3d( + input: Tensor, + output_size: List[int], + scales_d: Optional[float] = None, + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +) -> Tensor: + d_indices, h_indices, w_indices = _compute_upsample_nearest_indices( + input, output_size, (scales_d, scales_h, scales_w) + ) + result = aten._unsafe_index(input, (None, None, d_indices, h_indices, w_indices)) + + return result + + +@register_decomposition(aten._upsample_nearest_exact3d.default) +@aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def _upsample_nearest_exact3d( + input: Tensor, + output_size: List[int], + scales_d: Optional[float] = None, + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +) -> Tensor: + d_indices, h_indices, w_indices = _compute_upsample_nearest_indices( + input, output_size, (scales_d, scales_h, scales_w), exact=True + ) + result = aten._unsafe_index(input, (None, None, d_indices, h_indices, w_indices)) + + return result + + +def gather_params(params, has_biases, has_projections): + if has_biases and has_projections: + group_size = 5 + elif has_biases: + group_size = 4 + elif has_projections: + group_size = 3 + else: + group_size = 2 + + assert len(params) % group_size == 0, len(params) + return [ + tuple(params[i : i + group_size]) for i in range(0, len(params), group_size) + ] + + +def params_hiddens(params, hiddens, i, bidirectional): + if bidirectional: + cur_params, cur_hidden = params[2 * i], hiddens[2 * i] + bidir_params, bidir_hidden = params[2 * i + 1], hiddens[2 * i + 1] + else: + cur_params, cur_hidden = params[i], hiddens[i] + bidir_params, bidir_hidden = None, None + + return cur_params, cur_hidden, bidir_params, bidir_hidden + + +def update_hidden_for_packed(cur_hidden, last_batch_size, batch_size, hiddens): + assert last_batch_size > batch_size + hiddens.append(cur_hidden.narrow(0, batch_size, last_batch_size - batch_size)) + return cur_hidden.narrow(0, 0, batch_size) + + +def update_hidden_for_packed_reverse( + cur_hidden, last_batch_size, batch_size, inp_hidden +): + if last_batch_size == batch_size: + return cur_hidden + assert last_batch_size < batch_size + return torch.concat( + ( + cur_hidden, + inp_hidden.narrow(0, last_batch_size, batch_size - last_batch_size), + ) + ) + + +def one_layer_rnn_data( + inp, hidden, params, has_biases, hidden_fn, batch_sizes, reverse=False +): + ih_weight = params[0] + hh_weight = params[1] + ih_bias = params[2] if has_biases else None + hh_bias = params[3] if has_biases else None + + step_output = [] + hiddens: List[torch.Tensor] = [] + + last_batch_size = batch_sizes[-1] if reverse else batch_sizes[0] + cur_hidden = hidden.narrow(0, 0, last_batch_size) + split_inp = torch.split(inp, list(batch_sizes)) + if reverse: + split_inp = split_inp[::-1] + for inp in split_inp: + i = inp.shape[0] + + if last_batch_size == i: + pass # don't update cur_hidden + # this will only happen when reverse=False, since batch sizes are sorted largest -> smallest + elif reverse: + cur_hidden = update_hidden_for_packed_reverse( + cur_hidden, last_batch_size, i, hidden + ) + else: + cur_hidden = update_hidden_for_packed( + cur_hidden, last_batch_size, i, hiddens + ) + + cur_hidden = hidden_fn(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias) + last_batch_size = i + step_output.append(cur_hidden) + + if reverse: + step_output.reverse() + else: + hiddens.append(cur_hidden) + hiddens.reverse() + + out = torch.cat(step_output, 0) + hidden_out = torch.cat(hiddens, 0) if not reverse else cur_hidden + return out, hidden_out + + +def rnn_cell(nonlinearity): + def inner(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias): + return nonlinearity(F.linear(cur_hidden, hh_weight, hh_bias) + i) + + return inner + + +def rnn_cell_data(nonlinearity): + def inner(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias): + i = F.linear(i, ih_weight, ih_bias) + return nonlinearity(F.linear(cur_hidden, hh_weight, hh_bias) + i) + + return inner + + +def one_layer_rnn(inp, hidden, params, has_biases, hidden_fn, reverse=False): + ih_weight = params[0] + hh_weight = params[1] + ih_bias = params[2] if has_biases else None + hh_bias = params[3] if has_biases else None + + precomputed_input = F.linear(inp, ih_weight, ih_bias) + precomputed_input = precomputed_input.flip(0) if reverse else precomputed_input + cur_hidden = hidden.unsqueeze(0) + step_output = [] + for i in precomputed_input: + cur_hidden = hidden_fn(i, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias) + step_output.append(cur_hidden) + + if reverse: + step_output.reverse() + + out = torch.cat(step_output, 0) + + return out, cur_hidden.squeeze(0) + + +def mkldnn_one_layer_lstm(inp, hidden, params, has_biases, reverse=False): + w0 = params[0] + w1 = params[1] + if has_biases: + w2 = params[2] + w3 = params[3] + else: + w2 = torch.zeros(w0.size()) + w3 = torch.zeros(w1.size()) + + hx = hidden[0].unsqueeze(0) + cx = hidden[1].unsqueeze(0) + + batch_sizes: List[int] = [] + mode = 2 # third_party/ideep/include/ideep/abstract_types.hpp: ideep::rnn_kind::LSTM = 2 + hidden_size = hx.size(2) + num_layers = 1 + + # _rnn_helper already handles bidirectional and batch_first so we hard-code them to False here + bidirectional = False + batch_first = False + + train = False + # If batch_first, inp has been permuted in _rnn_helper. Convert to contiguous here. + # Same as aten/src/ATen/native/mkldnn/RNN.cpp: mkldnn_rnn: input = input.contiguous(); + inp = inp.contiguous() + hx = hx.contiguous() + cx = cx.contiguous() + outputs = torch.ops.aten.mkldnn_rnn_layer.default( + inp, + w0, + w1, + w2, + w3, + hx, + cx, + reverse, + batch_sizes, + mode, + hidden_size, + num_layers, + has_biases, + bidirectional, + batch_first, + train, + ) + y, hy, cy = outputs[0], outputs[1], outputs[2] + return y, (hy.squeeze(0), cy.squeeze(0)) + + +def _rnn_helper( + input, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + layer_fn, +): + input = input.transpose(0, 1) if batch_first else input + final_hiddens = [] + + for i in range(num_layers): + cur_params, cur_hidden, bidir_params, bidir_hidden = params_hiddens( + params, hidden, i, bidirectional + ) + dropout = dropout if (train and num_layers < i - 1) else 0.0 + fwd_inp, fwd_hidden = layer_fn(input, cur_hidden, cur_params, has_biases) + final_hiddens.append(fwd_hidden) + + if bidirectional: + bwd_inp, bwd_hidden = layer_fn( + input, bidir_hidden, bidir_params, has_biases, reverse=True + ) + final_hiddens.append(bwd_hidden) + + if bidirectional: + input = torch.cat([fwd_inp, bwd_inp], fwd_inp.dim() - 1) + else: + input = fwd_inp + + if dropout != 0 and train and i < num_layers - 1: + input = torch.dropout(input, dropout, train=True) + + input = input.transpose(0, 1) if batch_first else input + return input, final_hiddens + + +@register_decomposition(aten.rnn_tanh.input) +@aten.rnn_tanh.input.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.rnn_tanh.input.py_impl(DispatchKey.Autograd) +def rnn_tanh_input( + input, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + hidden = hx.unbind(0) + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + input, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + partial(one_layer_rnn, hidden_fn=rnn_cell(torch.tanh)), + ) + return out, torch.stack(final_hiddens, 0) + + +@register_decomposition(aten.rnn_relu.input) +@aten.rnn_relu.input.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.rnn_relu.input.py_impl(DispatchKey.Autograd) +def rnn_relu_input( + input, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + hidden = hx.unbind(0) + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + input, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + partial(one_layer_rnn, hidden_fn=rnn_cell(torch.relu)), + ) + return out, torch.stack(final_hiddens, 0) + + +@register_decomposition(aten.rnn_relu.data) +@aten.rnn_relu.data.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.rnn_relu.data.py_impl(DispatchKey.Autograd) +def rnn_relu_data( + data, + batch_sizes, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + hidden = hx.unbind(0) + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + data, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + False, + partial( + one_layer_rnn_data, + batch_sizes=batch_sizes, + hidden_fn=rnn_cell_data(torch.relu), + ), + ) + return out, torch.stack(final_hiddens, 0) + + +@register_decomposition(aten.rnn_tanh.data) +@aten.rnn_tanh.data.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.rnn_tanh.data.py_impl(DispatchKey.Autograd) +def rnn_tanh_data( + data, + batch_sizes, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + hidden = hx.unbind(0) + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + data, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + False, + partial( + one_layer_rnn_data, + batch_sizes=batch_sizes, + hidden_fn=rnn_cell_data(torch.tanh), + ), + ) + return out, torch.stack(final_hiddens, 0) + + +def lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim): + gates = F.linear(hx, hh_weight, hh_bias) + inp + chunked_gates = gates.chunk(4, chunk_dim) + in_gate = chunked_gates[0].sigmoid() + forget_gate = chunked_gates[1].sigmoid() + cell_gate = chunked_gates[2].tanh() + out_gate = chunked_gates[3].sigmoid() + cy = forget_gate * cx + (in_gate * cell_gate) + hy = out_gate * cy.tanh() + hy = hy if hr_weight is None else F.linear(hy, hr_weight, None) + + return hy, cy + + +def one_layer_lstm(inp, hidden, params, has_biases, reverse=False): + ih_weight = params[0] + hh_weight = params[1] + ih_bias = params[2] if has_biases else None + hh_bias = params[3] if has_biases else None + hr_weight = ( + params[4] if len(params) == 5 else params[2] if len(params) == 3 else None + ) + + hx = hidden[0].unsqueeze(0) + cx = hidden[1].unsqueeze(0) + + precomputed_input = F.linear(inp, ih_weight, ih_bias) + precomputed_input = precomputed_input.flip(0) if reverse else precomputed_input + step_output = [] + for inp in precomputed_input: + hx, cx = lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim=2) + step_output.append(hx) + + if reverse: + step_output.reverse() + + out = torch.cat(step_output, 0) + + return out, (hx.squeeze(1), cx.squeeze(1)) + + +def one_layer_lstm_data(inp, hidden, params, has_biases, batch_sizes, reverse=False): + ih_weight = params[0] + hh_weight = params[1] + ih_bias = params[2] if has_biases else None + hh_bias = params[3] if has_biases else None + hr_weight = ( + params[4] if len(params) == 5 else params[2] if len(params) == 3 else None + ) + + step_output = [] + hiddens = [] + + last_batch_size = batch_sizes[-1] if reverse else batch_sizes[0] + split_inp = torch.split(inp, list(batch_sizes)) + if reverse: + split_inp = split_inp[::-1] + + orig_hx = hidden[0] + orig_cx = hidden[1] + hx, cx = orig_hx.narrow(0, 0, last_batch_size), orig_cx.narrow( + 0, 0, last_batch_size + ) + + for inp in split_inp: + i = inp.shape[0] + inp = F.linear(inp, ih_weight, ih_bias) + + # this will only happen when reverse=False, since batch sizes are sorted largest -> smallest + if i < last_batch_size: + hiddens.append( + ( + hx.narrow(0, i, last_batch_size - i), + cx.narrow(0, i, last_batch_size - i), + ) + ) + hx, cx = hx.narrow(0, 0, i), cx.narrow(0, 0, i) + + # this will only happen when reverse=True + if i > last_batch_size: + hx = torch.concat( + (hx, orig_hx.narrow(0, last_batch_size, i - last_batch_size)), 0 + ) + cx = torch.concat( + (cx, orig_cx.narrow(0, last_batch_size, i - last_batch_size)), 0 + ) + + hx, cx = lstm_cell(inp, hx, cx, hh_weight, hh_bias, hr_weight, chunk_dim=1) + last_batch_size = i + step_output.append(hx) + + if reverse: + step_output.reverse() + hidden_out = (hx, cx) + else: + hiddens.append((hx, cx)) + hiddens.reverse() + hidden0, hidden1 = zip(*hiddens) + hidden_out = torch.cat(hidden0, 0), torch.cat(hidden1, 0) + + out = torch.cat(step_output, 0) + return out, hidden_out + + +def select_one_layer_lstm_function(input, hx, params): + r"""Check whether we could use decompose lstm with mkldnn_rnn_layer. + All the below conditions need to be met: + * ``torch._C._has_mkldnn`` returns ``True``. + * All the input args are on CPU. + * The dtypes of args are either torch.float or torch.bfloat16. + * Inference. + * ``has_projections`` returns ``False``. + + Args: + * input: the input sequence to LSTM + * hx: a tuple of the input hidden state and cell state ``(h_0, c_0)`` to LSTM + * params: the weight and bias tensors of LSTM + """ + + def use_mkldnn(input, hx, params): + if not torch._C._has_mkldnn: + return False + + tensors = [input] + list(hx) + list(chain.from_iterable(params)) + devices = {t.device for t in tensors} + if len(devices) != 1: + return False + + device = devices.pop() + if device != torch.device("cpu"): + return False + # With autocast, possible to have mixed dtype here + dtypes = {t.dtype for t in tensors} + for dtype in dtypes: + if dtype not in [torch.float, torch.bfloat16]: + return False + + if input.requires_grad: + return False + + has_projections = hx[0].size(2) != hx[1].size(2) + if has_projections: + return False + + return True + + # mkldnn_one_layer_lstm does not depend on seq_len while one_layer_lstm + # will expand over the seq_len dim + if use_mkldnn(input, hx, params): + return mkldnn_one_layer_lstm + else: + return one_layer_lstm + + +@register_decomposition(aten.lstm.input) +@aten.lstm.input.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.lstm.input.py_impl(DispatchKey.Autograd) +def lstm_impl( + input, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + assert len(hx) == 2, "lstm expects two hidden states" + params = gather_params(params, has_biases, hx[0].size(2) != hx[1].size(2)) + hidden = list(zip(hx[0], hx[1])) + layer_fn = select_one_layer_lstm_function(input, hx, params) + out, final_hiddens = _rnn_helper( + input, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + layer_fn, + ) + final_hiddens = list(zip(*final_hiddens)) + return out, torch.stack(final_hiddens[0], 0), torch.stack(final_hiddens[1], 0) + + +@register_decomposition(aten.lstm.data) +@aten.lstm.data.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.lstm.data.py_impl(DispatchKey.Autograd) +def lstm_data_impl( + data, + batch_sizes, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + assert len(hx) == 2, "lstm expects two hidden states" + params = gather_params(params, has_biases, hx[0].size(2) != hx[1].size(2)) + hidden = list(zip(hx[0], hx[1])) + out, final_hiddens = _rnn_helper( + data, + hidden, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + False, + partial(one_layer_lstm_data, batch_sizes=batch_sizes), + ) + final_hiddens = list(zip(*final_hiddens)) + return out, torch.stack(final_hiddens[0], 0), torch.stack(final_hiddens[1], 0) + + +def gru_cell(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias): + chunked_igates = inp.chunk(3, 1) + chunked_hgates = F.linear(cur_hidden, hh_weight, hh_bias).chunk(3, 2) + reset_gate = (chunked_hgates[0] + chunked_igates[0]).sigmoid() + input_gate = (chunked_hgates[1] + chunked_igates[1]).sigmoid() + new_gate = (chunked_igates[2] + (chunked_hgates[2] * reset_gate)).tanh() + return (cur_hidden - new_gate) * input_gate + new_gate + + +def gru_cell_data(inp, cur_hidden, ih_weight, ih_bias, hh_weight, hh_bias): + chunked_igates = F.linear(inp, ih_weight, ih_bias).chunk(3, 1) + chunked_hgates = F.linear(cur_hidden, hh_weight, hh_bias).chunk(3, 1) + reset_gate = (chunked_hgates[0] + chunked_igates[0]).sigmoid() + input_gate = (chunked_hgates[1] + chunked_igates[1]).sigmoid() + new_gate = (chunked_igates[2] + (chunked_hgates[2] * reset_gate)).tanh() + return (cur_hidden - new_gate) * input_gate + new_gate + + +@register_decomposition(aten.gru.data) +@aten.gru.data.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.gru.data.py_impl(DispatchKey.Autograd) +def gru_impl_data( + data, + batch_sizes, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, +): + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + data, + hx.unbind(0), + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + False, + partial(one_layer_rnn_data, batch_sizes=batch_sizes, hidden_fn=gru_cell_data), + ) + return out, torch.stack(final_hiddens, 0) + + +@register_decomposition(aten.gru.input) +@aten.gru.input.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.gru.input.py_impl(DispatchKey.Autograd) +def gru_impl( + input, + hx, + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, +): + params = gather_params(params, has_biases, False) + out, final_hiddens = _rnn_helper( + input, + hx.unbind(0), + params, + has_biases, + num_layers, + dropout, + train, + bidirectional, + batch_first, + partial(one_layer_rnn, hidden_fn=gru_cell), + ) + return out, torch.stack(final_hiddens, 0) + + +@register_decomposition(aten._upsample_bilinear2d_aa.vec) +@aten._upsample_bilinear2d_aa.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_bilinear2d_aa.vec.py_impl(DispatchKey.Autograd) +def upsample_bilinear2d_aa_vec(input, output_size, align_corners, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_h = get_scale_value(scale_factors, 0) + scale_w = get_scale_value(scale_factors, 1) + return torch.ops.aten._upsample_bilinear2d_aa( + input, osize, align_corners, scale_h, scale_w + ) + + +@register_decomposition(aten.upsample_bilinear2d.vec) +@aten.upsample_bilinear2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.upsample_bilinear2d.vec.py_impl(DispatchKey.Autograd) +def upsample_bilinear2d_vec(input, output_size, align_corners, scale_factors): + osize = upsample_compute_output_size(input.size(), output_size, scale_factors) + scale_h = get_scale_value(scale_factors, 0) + scale_w = get_scale_value(scale_factors, 1) + return upsample_bilinear2d(input, osize, align_corners, scale_h, scale_w) + + +@register_decomposition(aten.upsample_bilinear2d.default) +@aten.upsample_bilinear2d.default.py_impl(DispatchKey.Autograd) +@pw_cast_for_opmath +def upsample_bilinear2d( + input: Tensor, + output_size: List[int], + align_corners: bool, + scales_h: Optional[float] = None, + scales_w: Optional[float] = None, +) -> Tensor: + # get dimensions of original image + n_batch, n_channels, in_h, in_w = input.shape + + out_h = output_size[0] + out_w = output_size[1] + + # Calculate horizontal and vertical scaling factor + # TODO: Figure out if scales_h/scales_w matters here + if out_h > 1: + if align_corners: + h_scale_factor = (in_h - 1) / (out_h - 1) + else: + h_scale_factor = 1.0 / scales_h if scales_h is not None else in_h / out_h + else: + h_scale_factor = 0.0 + + if out_w > 1: + if align_corners: + w_scale_factor = (in_w - 1) / (out_w - 1) + else: + w_scale_factor = 1.0 / scales_w if scales_w is not None else in_w / out_w + else: + w_scale_factor = 0.0 + + i = torch.arange(out_h, dtype=input.dtype, device=input.device) + j = torch.arange(out_w, dtype=input.dtype, device=input.device) + + if align_corners: + x = h_scale_factor * i + y = w_scale_factor * j + else: + x = (h_scale_factor * (i + 0.5) - 0.5).clamp(min=0.0) + y = (w_scale_factor * (j + 0.5) - 0.5).clamp(min=0.0) + + x_floor = x.to(torch.int64) + x_ceil = torch.ceil(x).clamp(max=in_h - 1).to(torch.int64) + y_floor = y.to(torch.int64) + y_ceil = torch.ceil(y).clamp(max=in_w - 1).to(torch.int64) + + x_view = x.unsqueeze(1) + x_floor_view = x_floor.unsqueeze(1) + x_ceil_view = x_ceil.unsqueeze(1) + + v1 = aten._unsafe_index(input, [None, None, x_floor_view, y_floor]) + v2 = aten._unsafe_index(input, [None, None, x_ceil_view, y_floor]) + v3 = aten._unsafe_index(input, [None, None, x_floor_view, y_ceil]) + v4 = aten._unsafe_index(input, [None, None, x_ceil_view, y_ceil]) + + xscale2 = x_view - x_floor_view + xscale1 = 1.0 - xscale2 + + yscale2 = y - y_floor + yscale1 = 1.0 - yscale2 + + q1 = torch.mul(v1, xscale1) + torch.mul(v2, xscale2) + q2 = torch.mul(v3, xscale1) + torch.mul(v4, xscale2) + result = torch.mul(q1, yscale1) + torch.mul(q2, yscale2) + + # convert output to correct memory format, if necessary + memory_format = utils.suggest_memory_format(input) + + # following "heuristic: only use channels_last path when it's faster than the contiguous path" + if input.device.type == "cuda" and n_channels < 16: + memory_format = torch.contiguous_format + + result = result.contiguous(memory_format=memory_format) + + return result + + +@register_decomposition(aten.replication_pad2d.default) +@pw_cast_for_opmath +def replication_pad2d(input: Tensor, padding: List[int]) -> Tensor: + pad_left = padding[0] + pad_right = padding[1] + pad_top = padding[2] + pad_bottom = padding[3] + + # If all of the padding values are non-negative, then the following tensors + # are all equal to the input. But if any padding values are negative, we + # have to remove the appropriate rows and columns from the input. + # `input_mid` has all negative padding removed from it. `input_mid_tb` has + # negative left and right padding removed from it. `input_mid_lr` has + # negative top and bottom padding removed from it. + input_mid = input + input_mid_tb = input + input_mid_lr = input + + if pad_left < 0: + input_mid = input_mid[..., -pad_left:] + input_mid_tb = input_mid_tb[..., -pad_left:] + pad_left = 0 + + if pad_right < 0: + input_mid = input_mid[..., :pad_right] + input_mid_tb = input_mid_tb[..., :pad_right] + pad_right = 0 + + if pad_top < 0: + input_mid = input_mid[..., -pad_top:, :] + input_mid_lr = input_mid_lr[..., -pad_top:, :] + pad_top = 0 + + if pad_bottom < 0: + input_mid = input_mid[..., :pad_bottom, :] + input_mid_lr = input_mid_lr[..., :pad_bottom, :] + pad_bottom = 0 + + batch_dims_no_repeat = (1,) * (input.dim() - 2) + + repeat_top_left = batch_dims_no_repeat + (pad_top, pad_left) + repeat_top_middle = batch_dims_no_repeat + (pad_top, 1) + repeat_top_right = batch_dims_no_repeat + (pad_top, pad_right) + + top_rows = torch.cat( + [ + # top left + input[..., [0], :][..., [0]].repeat(repeat_top_left), + # top middle + input_mid_tb[..., [0], :].repeat(repeat_top_middle), + # top right + input[..., [0], :][..., [-1]].repeat(repeat_top_right), + ], + dim=-1, + ) + + repeat_middle_left = batch_dims_no_repeat + (1, pad_left) + repeat_middle_right = batch_dims_no_repeat + (1, pad_right) + + middle_rows = torch.cat( + [ + # middle left + input_mid_lr[..., [0]].repeat(repeat_middle_left), + # middle middle + input_mid, + # middle right + input_mid_lr[..., [-1]].repeat(repeat_middle_right), + ], + dim=-1, + ) + + repeat_bottom_left = batch_dims_no_repeat + (pad_bottom, pad_left) + repeat_bottom_middle = batch_dims_no_repeat + (pad_bottom, 1) + repeat_bottom_right = batch_dims_no_repeat + (pad_bottom, pad_right) + + bottom_rows = torch.cat( + [ + # bottom left + input[..., [-1], :][..., [0]].repeat(repeat_bottom_left), + # bottom middle + input_mid_tb[..., [-1], :].repeat(repeat_bottom_middle), + # bottom right + input[..., [-1], :][..., [-1]].repeat(repeat_bottom_right), + ], + dim=-1, + ) + + return torch.cat([top_rows, middle_rows, bottom_rows], dim=-2) + + +# We should be applying decompositions after all transformations +@register_decomposition(aten.is_same_size.default) +def is_same_size(a: Tensor, b: Tensor) -> bool: + return a.shape == b.shape + + +@register_decomposition([aten._reshape_alias, aten._unsafe_view]) +@out_wrapper() +def _reshape_alias(x, shape, *args): + return aten.view(x, shape) + + +@register_decomposition([aten._unsafe_index]) +def _index(x, indices): + return aten.index(x, indices) + + +def _nll_loss_forward( + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, +) -> Tuple[Tensor, Tensor]: + # self can be [N, C] or [C] + # target can be [N] or [] + + n_dims = self.dim() + channel_dim = 1 + if n_dims < 2: + channel_dim = 0 + + if weight is not None: + if n_dims > 1: + shape = [ + 1, + ] * n_dims + shape[channel_dim] = weight.shape[0] + w = weight.view(shape) + else: + w = weight + self = self * w + safe_target = torch.where(target != ignore_index, target, 0) + safe_target_ = safe_target.unsqueeze(channel_dim) + # target can be [N, 1] or [1] + + result = -torch.gather(self, channel_dim, safe_target_).squeeze(channel_dim) + + result = torch.where(target != ignore_index, result, 0) + + if reduction == Reduction.NONE.value and n_dims > 1: + total_weight = self.new_full((), 0.0) + return result, total_weight + + if weight is not None: + w = w.expand(self.shape) + wsum = torch.gather(w, channel_dim, safe_target_).squeeze(channel_dim) + wsum = torch.where(target != ignore_index, wsum, 0) + total_weight = wsum.sum() + else: + total_weight = (target != ignore_index).sum().to(self) + + if reduction == Reduction.SUM.value: + result = result.sum() + elif reduction == Reduction.MEAN.value: + result = result.sum() / total_weight + + return result, total_weight + + +@register_decomposition(aten.nll_loss_forward) +@out_wrapper("output", "total_weight") +def nll_loss_forward( + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, +) -> Tuple[Tensor, Tensor]: + assert self.dim() > 0 and self.dim() <= 2, "input tensor should be 1D or 2D" + assert ( + target.dim() <= 1 + ), "0D or 1D target tensor expected, multi-target not supported" + + no_batch_dim = self.dim() == 1 and target.dim() == 0 + assert no_batch_dim or ( + self.shape[0] == target.shape[0] + ), f"size mismatch (got input: {self.shape}, target: {target.shape})" + + n_classes = self.shape[-1] + + assert weight is None or ( + weight.dim() == 1 and weight.numel() == n_classes + ), f"weight tensor should be defined either for all {n_classes} classes or no classes but got weight tensor of shape: {weight.shape}" # noqa: B950 + + return _nll_loss_forward(self, target, weight, reduction, ignore_index) + + +@register_decomposition(aten.nll_loss2d_forward) +@out_wrapper("output", "total_weight") +def nll_loss2d_forward( + self: Tensor, + target: Tensor, + weight: Optional[Tensor], + reduction: int, + ignore_index: int, +) -> Tuple[Tensor, Tensor]: + return _nll_loss_forward(self, target, weight, reduction, ignore_index) + + +# These are adapted from aten/src/ATen/native/UpSample.h, wich is based on +# https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +def _upsample_cubic_convolution1(x: Tensor, A: float) -> Tensor: + return ((A + 2) * x - (A + 3)) * x * x + 1 + + +def _upsample_cubic_convolution2(x: Tensor, A: float) -> Tensor: + return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A + + +def _upsample_get_cubic_coefficients(t: Tensor) -> TensorSequenceType: + A = -0.75 + return ( + _upsample_cubic_convolution2(t + 1.0, A), + _upsample_cubic_convolution1(t, A), + _upsample_cubic_convolution1(1.0 - t, A), + _upsample_cubic_convolution2(2.0 - t, A), + ) + + +def _upsample_cubic_interp1d(coeffs: TensorSequenceType, ts: Tensor) -> Tensor: + coeffs2 = _upsample_get_cubic_coefficients(ts) + return _sum_tensors(c1 * c2 for (c1, c2) in zip(coeffs, coeffs2)) + + +# Need this instead of just sum() to keep mypy happy +def _sum_tensors(ts: Iterable[Tensor]) -> Tensor: + return reduce(torch.add, ts) + + +def _linspace_from_neg_one( + num_steps: int, align_corners: bool, dtype: torch.dtype, device: torch.device +): + if num_steps <= 1: + return torch.tensor(0, device=device, dtype=dtype) + + a = ((num_steps - 1) / num_steps) if not align_corners else 1 + return torch.linspace(-a, a, steps=num_steps, device=device, dtype=dtype) + + +def _make_base_grid_4d(theta: Tensor, h: int, w: int, align_corners: bool): + dtype = theta.dtype + device = theta.device + + # Using padding and summation generates a single kernel vs using torch.stack where 3 kernels generated + # corresponding to each individual tensor: grid_x, grid_y, grid_one + grid_x = _linspace_from_neg_one(w, align_corners, dtype, device).view(1, w, 1) + grid_y = _linspace_from_neg_one(h, align_corners, dtype, device).view(h, 1, 1) + grid_one = torch.ones((1, 1, 1), dtype=dtype, device=device) + + # this is just a temporary hack and we should use torch.stack here once #104480 is merged + grid_x = torch.nn.functional.pad(grid_x, pad=(0, 2), mode="constant", value=0) + grid_y = torch.nn.functional.pad(grid_y, pad=(1, 1), mode="constant", value=0) + grid_one = torch.nn.functional.pad(grid_one, pad=(2, 0), mode="constant", value=0) + return grid_x + grid_y + grid_one + + +def _make_base_grid_5d(theta: Tensor, d: int, h: int, w: int, align_corners: bool): + dtype = theta.dtype + device = theta.device + + grid_x = _linspace_from_neg_one(w, align_corners, dtype, device).view(1, 1, w, 1) + grid_y = _linspace_from_neg_one(h, align_corners, dtype, device).view(1, h, 1, 1) + grid_z = _linspace_from_neg_one(d, align_corners, dtype, device).view(d, 1, 1, 1) + grid_one = torch.ones((1, 1, 1, 1), dtype=dtype, device=device) + + # this is just a temporary hack and we should use torch.stack here once #104480 is merged + grid_x = torch.nn.functional.pad(grid_x, pad=(0, 3), mode="constant", value=0) + grid_y = torch.nn.functional.pad(grid_y, pad=(1, 2), mode="constant", value=0) + grid_z = torch.nn.functional.pad(grid_z, pad=(2, 1), mode="constant", value=0) + grid_one = torch.nn.functional.pad(grid_one, pad=(3, 0), mode="constant", value=0) + return grid_x + grid_y + grid_z + grid_one + + +def _affine_grid_generator_4d(theta: Tensor, size: List[int], align_corners: bool): + n, _, h, w = size + base_grid = _make_base_grid_4d(theta, h, w, align_corners=align_corners) + # base_grid shape is (h, w, 3) and theta shape is (n, 2, 3) + # We do manually a matrix multiplication which is faster than mm() + # (h * w, 3, 1) * (n, 1, 3, 2) -> (n, h * w, 2) + grid = (base_grid.view(-1, 3, 1) * theta.mT.unsqueeze(1)).sum(-2) + return grid.view(n, h, w, 2) + + +def _affine_grid_generator_5d(theta: Tensor, size: List[int], align_corners: bool): + n, _, d, h, w = size + base_grid = _make_base_grid_5d(theta, d, h, w, align_corners=align_corners) + # base_grid shape is (d, h, w, 4) and theta shape is (n, 3, 4) + # We do manually a matrix multiplication which is faster than mm() + # (d * h * w, 4, 1) * (n, 1, 4, 3) -> (n, h * w, 3) + grid = (base_grid.view(-1, 4, 1) * theta.mT.unsqueeze(1)).sum(-2) + return grid.view(n, d, h, w, 3) + + +@register_decomposition(aten.affine_grid_generator) +@out_wrapper() +@pw_cast_for_opmath +def affine_grid_generator(theta: Tensor, size: List[int], align_corners: bool): + torch._check( + len(size) in (4, 5), + lambda: "affine_grid_generator needs 4d (spatial) or 5d (volumetric) inputs.", + ) + if len(size) == 4: + return _affine_grid_generator_4d(theta, size, align_corners=align_corners) + else: + return _affine_grid_generator_5d(theta, size, align_corners=align_corners) + + +def _grid_sampler_2d( + a: Tensor, + grid: Tensor, + interpolation_mode: int = 0, + padding_mode: int = 0, + align_corners: bool = False, + _expand_grid: bool = True, +) -> Tensor: + # This method is a copy of grid_sampler_2d implementation and introduced with additional arg _expand_grid to + # optionally expand the input grid for performance reasons. + # Experimenting locally it was found that compiled CUDA code is accelerated by ~5x + # and CPU code by ~2x on bicubic mode, if we expand the grid from (N, H, W, 2) into (N, C, H, W, 2) + # However, this leads to a slowdown around ~0.8x on CPU bilinear mode, channels first. + # Thus we apply this hack to not expand the grid for this case. + + torch._check( + interpolation_mode in (0, 1, 2), + lambda: f"Invalid interpolation mode {interpolation_mode}", + ) + torch._check( + padding_mode in (0, 1, 2), lambda: f"Invalid padding mode {padding_mode}" + ) + + def unnormalize(coords: Tensor, size: int) -> Tensor: + # Rescale coordinates from [-1, 1] to: + # [0, size - 1] if align_corners is True + # [-.5, size -.5] if align_corners is False + mul = (size * 0.5 - 0.5) if align_corners else (size * 0.5) + ofs = size * 0.5 - 0.5 + return coords * mul + ofs + + # Reflects coordinates until they fall between low and high (inclusive). + # The bounds are passed as twice their value so that half-integer values + # can be represented as ints. + def reflect_coordinates(coords: Tensor, twice_low: int, twice_high: int) -> Tensor: + if twice_low == twice_high: + return torch.zeros_like(coords) + coords_min = twice_low / 2 + coords_span = (twice_high - twice_low) / 2 + coords2 = (coords - coords_min).abs() + extra = torch.fmod(coords2, coords_span) + flips = (coords2 / coords_span).floor().to(dtype=torch.int8) + return torch.where( + flips & 1 == 0, extra + coords_min, coords_span + coords_min - extra + ) + + def compute_coordinates(coords: Tensor, size: int) -> Tensor: + if padding_mode == 0: # Zero + return coords + elif padding_mode == 1: # Borders + return torch.clamp(coords, 0, size - 1) + else: # padding_mode == 2, Reflection + if align_corners: + coords_reflected = reflect_coordinates(coords, 0, 2 * (size - 1)) + else: + coords_reflected = reflect_coordinates(coords, -1, 2 * size - 1) + return torch.clamp(coords_reflected, 0, size - 1) + + def compute_source_index(coords: Tensor, size: int) -> Tensor: + coords_un = unnormalize(coords, size) + return compute_coordinates(coords_un, size) + + N, C, iH, iW = a.shape + _, oH, oW, two = grid.shape + assert two == 2 + + if _expand_grid: + # Let's expand grid to [N, C, oH, oW, 2] + # This allows to generate a single triton cuda kernel instead of two kernels. + # Two kernels are due source indices, weights have shape (N, 1, oH, oW), xnumel=N*oH*oW + # and output has shape (N, C, oH, oW), xnumel=N*C*oH*oW + # Expanding grid to (N, C, oH, oW, two) unifies xnumel to N*C*oH*oW + grid = grid.view(N, 1, oH, oW, two).expand(N, C, oH, oW, 2) + + def in_bounds_cond(xs: Tensor, ys: Tensor) -> Tensor: + return torch.logical_and( + 0 <= xs, torch.logical_and(xs < iW, torch.logical_and(0 <= ys, ys < iH)) + ) + + N_idx = torch.arange(N, device=a.device).view(N, 1, 1, 1) + C_idx = torch.arange(C, device=a.device).view(1, C, 1, 1) + + def clip(xs: Tensor, ys: Tensor, ws: Tensor) -> TensorSequenceType: + cond = in_bounds_cond(xs, ys) + # To clip to inside valid coordinates, we map the coordinates + # to (x, y) = (0, 0) and also set the weight to 0 + # We also change the shape of the tensor to the appropriate one for + # broadcasting with N_idx, C_idx for the purposes of advanced indexing + c = C if _expand_grid else 1 + return tuple( + torch.where(cond, t, 0).view(N, c, oH, oW) + for t in (xs.to(dtype=torch.int64), ys.to(dtype=torch.int64), ws) + ) + + def get_summand(ix: Tensor, iy: Tensor, w) -> Tensor: + # Perform clipping, index into input tensor and multiply by weight + idx_x, idx_y, w_ = clip(ix, iy, w) + return a[N_idx, C_idx, idx_y, idx_x] * w_ + + x = grid[..., 0] + y = grid[..., 1] + + if interpolation_mode == 0: # Bilinear + ix = compute_source_index(x, iW) + iy = compute_source_index(y, iH) + + ix_nw, iy_nw = ix.floor(), iy.floor() + ix_ne, iy_ne = ix_nw + 1, iy_nw + ix_sw, iy_sw = ix_nw, iy_nw + 1 + ix_se, iy_se = ix_ne, iy_sw + + w_nw = (ix_se - ix) * (iy_se - iy) + w_ne = (ix - ix_sw) * (iy_sw - iy) + w_sw = (ix_ne - ix) * (iy - iy_ne) + w_se = (ix - ix_nw) * (iy - iy_nw) + + return _sum_tensors( + get_summand(ix, iy, w) + for (ix, iy, w) in ( + (ix_nw, iy_nw, w_nw), + (ix_ne, iy_ne, w_ne), + (ix_sw, iy_sw, w_sw), + (ix_se, iy_se, w_se), + ) + ) + elif interpolation_mode == 1: # Nearest + ix = compute_source_index(x, iW) + iy = compute_source_index(y, iH) + + ix_nearest = ix.round() + iy_nearest = iy.round() + + return get_summand(ix_nearest, iy_nearest, 1) + else: # interpolation_mode == 2, Bicubic + ix = unnormalize(x, iW) + iy = unnormalize(y, iH) + + ix_nw = ix.floor() + iy_nw = iy.floor() + + tx = ix - ix_nw + ty = iy - iy_nw + + if not _expand_grid: + tx = tx.unsqueeze(1) + ty = ty.unsqueeze(1) + + def get_value_bounded(ix: Tensor, iy: Tensor) -> Tensor: + x = compute_coordinates(ix, iW) + y = compute_coordinates(iy, iH) + return get_summand(x, y, 1) + + def get_coeff(ofs: int) -> Tensor: + iy_ofs = iy_nw + (ofs - 1) + cs = ( + get_value_bounded(ix_nw - 1, iy_ofs), + get_value_bounded(ix_nw, iy_ofs), + get_value_bounded(ix_nw + 1, iy_ofs), + get_value_bounded(ix_nw + 2, iy_ofs), + ) + return _upsample_cubic_interp1d(cs, tx) + + coeffs = tuple(get_coeff(ofs) for ofs in range(4)) + return _upsample_cubic_interp1d(coeffs, ty) + + +@register_decomposition(aten.grid_sampler_2d) +@out_wrapper() +@pw_cast_for_opmath +def grid_sampler_2d( + a: Tensor, + grid: Tensor, + interpolation_mode: int = 0, + padding_mode: int = 0, + align_corners: bool = False, +) -> Tensor: + return _grid_sampler_2d( + a, + grid=grid, + interpolation_mode=interpolation_mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + + +@register_decomposition(aten.mv) +@out_wrapper() +@pw_cast_for_opmath +def mv(self, vec): + torch._check( + self.dim() == 2 and vec.dim() == 1, + lambda: f"matrix @ vector expected, got {self.dim()}, {vec.dim()}", + ) + torch._check( + self.size(1) == vec.size(0), + lambda: f"size mismatch, got input ({self.size(0)}x{self.size(1)}), vec ({vec.size(0)})", + ) + return (self * vec).sum(dim=1) + + +@register_decomposition(aten.binary_cross_entropy_with_logits) +@out_wrapper() +def binary_cross_entropy_with_logits( + self, target, weight=None, pos_weight=None, reduction=Reduction.MEAN.value +): + max_val = (-self).clamp_min(0) + if pos_weight is not None: + log_weight = (pos_weight - 1) * target + 1 + loss = (1 - target) * self + log_weight * ( + ((-max_val).exp() + (-self - max_val).exp()).log() + max_val + ) + else: + loss = ( + (1 - target) * self + + max_val + + ((-max_val).exp() + (-self - max_val).exp()).log() + ) + + if weight is not None: + loss = loss * weight + + return apply_loss_reduction(loss, reduction) + + +def should_fold(tensor1: torch.Tensor, tensor2: torch.Tensor) -> bool: + # For comments of the logic of this function see eager in /native/LinearAlgebra.cpp + + t1, t2 = (tensor1, tensor2) if tensor1.ndim >= tensor2.ndim else (tensor2, tensor1) + + if not (t1.ndim >= 3 and t2.ndim <= 2): + return False + if t2.requires_grad: + return True + if tensor1.ndim == 2: + return False + if t1.numel() == 0: + return True + + t1_shape = t1.shape + t1_stride = t1.stride() + return all( + st1 == st2 * s2 + for (st1, st2, s2) in zip(t1_stride[:-2], t1_stride[1:-1], t1_shape[1:-1]) + ) + + +@aten.matmul.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@out_wrapper() +def matmul(tensor1, tensor2): + dim_tensor1 = tensor1.dim() + dim_tensor2 = tensor2.dim() + assert dim_tensor1 != 0 and dim_tensor2 != 0 + if dim_tensor1 == 1 and dim_tensor2 == 1: + return torch.dot(tensor1, tensor2) + elif dim_tensor1 == 2 and dim_tensor2 == 1: + return torch.mv(tensor1, tensor2) + elif dim_tensor1 == 1 and dim_tensor2 == 2: + return torch.squeeze(torch.mm(torch.unsqueeze(tensor1, 0), tensor2), 0) + elif dim_tensor1 == 2 and dim_tensor2 == 2: + return torch.mm(tensor1, tensor2) + elif should_fold(tensor1, tensor2): + # dim_tensor1 >=3 && (dim_tensor2 == 1 || dim_tensor2 == 2) || + # dim_tensor2 >=3 && (dim_tensor1 == 1 || dim_tensor1 == 2) + # and some condition on the strides is fulfilled + + # optimization: use mm instead of bmm by folding the batch of the larger tensor + # into its leading matrix dimension + transpose = dim_tensor2 > dim_tensor1 + t1 = tensor2.mT if transpose else tensor1 + t2 = ( + tensor2 if not transpose else (tensor1.t() if dim_tensor1 == 2 else tensor1) + ) + # Invariant: t1.dim() >= 3 && (t2.dim() == 1 || t2.dim() == 2) + # and t1 and t2 are matmul-compatible + + # Why not t1.view(-1, sizes_1[-1])? + # If the last dim is 0, then view(-1, 0) won't work because the -1 becomes ambiguous. + # This can happen in e.g. [3, 5, 0] @ [0, 0]. + sizes_1 = t1.shape + output_shape = list(sizes_1[:-1]) + folded_dim1 = reduce(operator.mul, output_shape) + + # Readjust output_shape if we are multiplying by a matrix + t2_is_matrix = t2.dim() == 2 + if t2_is_matrix: + output_shape.append(t2.shape[1]) + + # This will almost always be a view. + # It may not be a view if t2->requires_grad(). See should_fold in aten/ for an explanation + t1_folded = t1.reshape(folded_dim1, sizes_1[-1]) + if t2_is_matrix: + # This copies if we perform a 2D @ 3D and the first tensor requires_grad + # See should_fold native/LinearAlgebra.cpp for why. + output = t1_folded.mm(t2).view(output_shape) + return output.mT.contiguous() if transpose else output + else: + return t1_folded.mv(t2).view(output_shape) + + elif dim_tensor1 >= 1 and dim_tensor2 >= 1: + # We are multiplying b1 x n x m1 by x2 x m2 x p (where b1 can be a list); + # we track m1 vs m2 separately even though they must match for nicer error messages + n = tensor1.size(-2) if dim_tensor1 > 1 else 1 + m1 = tensor1.size(-1) + batch_tensor1 = tensor1.shape[:-2] + m2 = tensor2.size(-2) if dim_tensor2 > 1 else tensor2.size(-1) + p = tensor2.size(-1) if dim_tensor2 > 1 else 1 + + batch_tensor2: List[int] = [] + # TODO: handling of slice + for i in range(dim_tensor2 - 2): + batch_tensor2.append(tensor2.size(i)) + + # Same optimization for the gradients as that in should_fold + # If we're going to broadcast, we force it to go through the should_fold branch + if ( + dim_tensor1 == 3 + and dim_tensor2 == 3 + and batch_tensor1[0] != batch_tensor2[0] + ): + if batch_tensor1[0] == 1 and tensor1.requires_grad: + return matmul(tensor1.squeeze(0), tensor2) + if batch_tensor2[0] == 1 and tensor2.requires_grad: + return matmul(tensor1, tensor2.squeeze(0)) + + # expand the batch portion (i.e. cut off matrix dimensions and expand rest) + expand_batch_portion = list( + torch.broadcast_shapes(batch_tensor1, batch_tensor2) + ) + + tensor1_expand_size = expand_batch_portion + [n, m1] + + expand_batch_product = prod(expand_batch_portion) + + # HACK: We need reshape with symint support + tensor1_expanded = tensor1.expand(tensor1_expand_size).reshape( + expand_batch_product, n, m1 + ) + + vector_rhs = dim_tensor2 == 1 + if vector_rhs: + tensor2_expand_size = expand_batch_portion + [m2] + tensor2_expanded = ( + tensor2.expand(tensor2_expand_size) + .reshape(expand_batch_product, m2) + .unsqueeze(2) + ) + else: + tensor2_expand_size = expand_batch_portion + [m2, p] + tensor2_expanded = tensor2.expand(tensor2_expand_size).reshape( + expand_batch_product, m2, p + ) + + output_shape = expand_batch_portion + if dim_tensor1 > 1: + output_shape.append(n) + + if dim_tensor2 > 1: + output_shape.append(p) + + if vector_rhs: + return tensor1_expanded.bmm(tensor2_expanded).squeeze(-1).view(output_shape) + else: + return tensor1_expanded.bmm(tensor2_expanded).view(output_shape) + else: + torch._check(False, lambda: "both arguments to matmul need to be at least 1D") + + +@register_decomposition(aten.upsample_bicubic2d.default) +@pw_cast_for_opmath +def upsample_bicubic2d_default( + a: Tensor, + output_size: Tuple[int, int], + align_corners: bool, + scale_h: Optional[float] = None, + scale_w: Optional[float] = None, +) -> Tensor: + N, C, iH, iW = a.shape + oH, oW = output_size + + def compute_scale(in_size, out_size, align_corners, scale=None): + if align_corners: + return (in_size - 1) / (out_size - 1) if out_size > 1 else 0 + else: + return 1 / scale if scale is not None and scale > 0 else in_size / out_size + + def compute_source_index(scale, dst_index, align_corners): + if align_corners: + return scale * dst_index + else: + return scale * (dst_index + 0.5) - 0.5 + + height_scale = compute_scale(iH, oH, align_corners, scale_h) + width_scale = compute_scale(iW, oW, align_corners, scale_w) + + N_idx = torch.arange(N, device=a.device).view(N, 1, 1, 1) + C_idx = torch.arange(C, device=a.device).view(1, C, 1, 1) + out_y = torch.arange(oH, device=a.device).view((1, 1, oH, 1)) + out_x = torch.arange(oW, device=a.device).view((1, 1, 1, oW)) + + real_x = compute_source_index(width_scale, out_x, align_corners) + in_x = real_x.floor() + t_x = real_x - in_x + ix = in_x.to(dtype=torch.int64) + + real_y = compute_source_index(height_scale, out_y, align_corners) + in_y = real_y.floor() + t_y = real_y - in_y + iy = in_y.to(dtype=torch.int64) + + iys_ofs = (iy - 1, iy, iy + 1, iy + 2) + ixs_ofs = (ix - 1, ix, ix + 1, ix + 2) + + def load_bounded(ys, xs): + y_idx = torch.clamp(ys, 0, iH - 1) + x_idx = torch.clamp(xs, 0, iW - 1) + return aten._unsafe_index(a, [N_idx, C_idx, y_idx, x_idx]) + + def get_x_interp(y): + coeffs_x = tuple(load_bounded(y, x_ofs) for x_ofs in ixs_ofs) + return _upsample_cubic_interp1d(coeffs_x, t_x) + + coeffs_y = tuple(get_x_interp(y_ofs) for y_ofs in iys_ofs) + result = _upsample_cubic_interp1d(coeffs_y, t_y) + + # convert output to correct memory format, if necessary + memory_format = utils.suggest_memory_format(a) + result = result.contiguous(memory_format=memory_format) + return result + + +@register_decomposition(aten.upsample_bicubic2d.vec) +@aten.upsample_bicubic2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.upsample_bicubic2d.vec.py_impl(DispatchKey.Autograd) +@out_wrapper() +@pw_cast_for_opmath +def upsample_bicubic2d_vec( + a: Tensor, + output_size: Optional[Tuple[int, int]], + align_corners: bool, + scale_factors: Optional[Tuple[float, float]] = None, +) -> Tensor: + torch._check( + bool(output_size) + bool(scale_factors) == 1, + lambda: "Must specify exactly one of output_size and scale_factors.", + ) + if output_size is None: + assert scale_factors is not None + output_size = cast( + Tuple[int, int], + tuple( + sym_int(sym_float(w) * scale) + for w, scale in zip(a.shape[2:], scale_factors) + ), + ) + scale_h, scale_w = scale_factors if scale_factors else (None, None) + return upsample_bicubic2d_default(a, output_size, align_corners, scale_h, scale_w) + + +@register_decomposition(aten.aminmax) +@out_wrapper("min", "max") +def aminmax(self, *, dim=None, keepdim=False): + amin = torch.amin(self, dim=dim, keepdim=keepdim) + amax = torch.amax(self, dim=dim, keepdim=keepdim) + return amin, amax + + +@register_decomposition(aten.nansum) +@out_wrapper() +def nansum(self, dim=None, keepdim=False, *, dtype=None): + return aten.sum(torch.where(torch.isnan(self), 0, self), dim, keepdim, dtype=dtype) + + +@register_decomposition([aten.arange.default, aten.arange.out]) +@out_wrapper() +def arange_default( + end: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + pin_memory: bool = False, +): + return aten.arange.start_step( + 0, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_decomposition([aten.arange.start]) +def arange_start( + start: NumberType, + end: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + pin_memory: bool = False, +): + return aten.arange.start_step( + start, end, 1, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory + ) + + +@register_decomposition(out_dtype) +def out_dtype_decomp(*args, **kwargs): + from torch._higher_order_ops.out_dtype import out_dtype_dense + + return out_dtype_dense(*args, **kwargs) + + +@register_decomposition(aten.multi_margin_loss) +@aten.multi_margin_loss.default.py_impl(DispatchKey.Autograd) +@out_wrapper() +def multi_margin_loss( + input: Tensor, + target: Tensor, + p: NumberType = 1, + margin: NumberType = 1, + weight: Optional[Tensor] = None, + reduction: int = Reduction.MEAN.value, +) -> Tensor: + input = torch.atleast_2d(input) + target = torch.atleast_1d(target) + nframe = input.shape[0] + dim = input.shape[1] + torch._check(p == 1 or p == 2, lambda: "only p == 1 and p == 2 supported") + torch._check( + input.ndim == 2 and dim != 0, + lambda: f"Expected non-empty vector or matrix with optional 0-dim batch size, but got: {input.shape}", + ) + torch._check( + target.ndim == 1 and target.numel() == nframe, + lambda: f"inconsistent target size, expected {nframe} but got {target.shape}", + ) + if weight is not None: + weight = torch.atleast_1d(weight) + torch._check( + weight.ndim == 1 and weight.numel() == dim, # type: ignore[union-attr] + lambda: f"inconsistent weight size, expected {dim} but got {weight.shape}", # type: ignore[union-attr] + ) + target = target.unsqueeze(1) + u = torch.gather(input, dim=1, index=target) + z = margin - u + input + z = z.clamp_min(0) + z = z if p == 1 else z * z + if weight is not None: + z = z * weight[target] + idx = torch.arange(dim, device=input.device) + z = torch.where(idx != target, z, 0) + if reduction == Reduction.MEAN.value: + return z.mean() + elif reduction == Reduction.SUM.value: + return z.sum() / z.shape[1] + else: + return z.mean(dim=1) + + +@register_decomposition(aten.multilabel_margin_loss_forward) +@aten.multilabel_margin_loss_forward.default.py_impl(DispatchKey.Autograd) +@out_wrapper("output", "is_target") +def multilabel_margin_loss_forward( + input: Tensor, + target: Tensor, + reduction: int, +) -> Tuple[Tensor, Tensor]: + orig_input_shape = input.shape + orig_target_shape = target.shape + input = torch.atleast_2d(input) + target = torch.atleast_2d(target) + dim = input.shape[1] + torch._check( + len(orig_input_shape) <= 2 and dim != 0, + lambda: f"Expected non-empty vector or matrix with optional 0-dim batch size, but got: {orig_input_shape}", + ) + torch._check( + len(orig_target_shape) <= 2 and orig_target_shape == orig_input_shape, + lambda: f"inconsistent target size: {orig_target_shape} for input of size: {orig_input_shape}", + ) + # ignores labels after the first -1, detects when -1 is not present + idx = torch.arange(dim, device=target.device) + is_end = target == -1 + end_idx = torch.amin(torch.where(is_end, idx, dim), dim=-1, keepdim=True) + # target indices + target_mask = idx < end_idx + # masks target to be able to use gather, which doesn't allow -1 + tidx0 = torch.where(target_mask, target, 0) + u = torch.gather(input, dim=-1, index=tidx0) + # is_target + tidx1 = torch.where(target_mask, target, -1) + is_target = torch.any(idx == tidx1.unsqueeze(dim=-1), dim=1) + # loss + z = 1.0 - u.T.unsqueeze(dim=-1) + input + z = z.clamp_min(0) + z = z / dim + # masks loss + z = torch.where(is_target, 0, z) + # reduction + if reduction == Reduction.MEAN.value: + z = z.sum(dim=(0, -1)).mean() + elif reduction == Reduction.SUM.value: + z = z.sum() + else: + z = z.sum(dim=(0, -1)) + # result + is_target = is_target.to(input.dtype).reshape(orig_target_shape) + return z, is_target + + +# scaled_dot_product_attention used to be decomposed in pre-autograd, given that +# it calls _scaled_dot_product_attention_math and +# _scaled_dot_product_attention_math only has a CompositeImplicitAutograd +# kernel. As a result it's decomposed into ops with finer granularity. +# However recent PRs (#103826 #105131) added new logic in +# scaled_dot_product_attention and now it calls +# _scaled_dot_product_flash_attention which contains a CPU kernel. This results +# in _scaled_dot_product_flash_attention showing up in torch.export(). +# This decomposition ensures scaled_dot_product_attention is still decomposed +# the same way as before, i.e., going through +# _scaled_dot_product_attention_math. Notice that this decomp rule should be +# excluded by inductor. +@register_decomposition(aten._scaled_dot_product_flash_attention.default) +def scaled_dot_product_flash_attention( + query: Tensor, + key: Tensor, + value: Tensor, + dropout_p: float = 0.0, + is_causal: bool = False, + return_debug_mask: bool = False, + *, + scale: Optional[float] = None, +) -> Tuple[Tensor, Tensor, Tensor, Tensor, int, int, Tensor, Tensor, Tensor]: + dtype = query.dtype + batchSize, num_head, qSize, headSize = ( + query.shape[0], + query.shape[1], + query.shape[2], + query.shape[3], + ) + + torch._check( + torch.is_floating_point(query) and dtype is not torch.half, + lambda: f"query must be FP32, FP64, BF16 but got {query.dtype}", + ) + torch._check( + query.dim() == 4 and key.dim() == 4 and value.dim() == 4, + lambda: f"q, k, v must be a 4 dimensional tensor, got {query.dim()}, {key.dim()}, {value.dim()}", + ) + torch._check( + dropout_p == 0.0, lambda: f"dropout probability must be zero, got {dropout_p}" + ) + torch._check( + query.shape[3] == value.shape[3] and key.shape[3] == value.shape[3], + lambda: "q, k, v should have the same head size", + ) + torch._check( + return_debug_mask is False, lambda: "return_debug_mask is not supported." + ) + + logsumexp = torch.empty([batchSize, qSize, num_head, headSize], dtype=torch.float) + cum_seq_q, cum_seq_k = torch.empty([], dtype=torch.long), torch.empty( + [], dtype=torch.long + ) + max_q, max_k = 0, 0 + philox_seed, philox_offset = torch.empty([], dtype=torch.long), torch.empty( + [], dtype=torch.long + ) + debug_attn_mask = torch.empty( + [], + dtype=query.dtype, + device=query.device, + requires_grad=query.requires_grad, + ) + output, _ = aten._scaled_dot_product_attention_math.default( + query, key, value, None, dropout_p, is_causal, None, scale=scale + ) + # Why this change? + # In pre-dispatch export scaled_dot_product_attention is executed via + # * flash_attention. + # flash_attention allocates output tensor as (N, L, H, E) + # it then transposes that to get (N, H, L, E) which is supposed to be the return + # tensor dim for scaled_dot_product_attention + # assume x: [N, H, L, E] is the output sdpa + # In MHA code, this output is then permuted via (2, 0, 1, 3) to get + # (L, N, H, E) dim tensor + # x = x.permute(2, 0, 1, 3).contiguous() and the viewed via + # x = x.view(L * N, H * E) + # During pre autograd dispatch call to contiguous is not traced because + # flash_attention output after the x.permute is already contiguous + # on which the view is valid + # However, during 2nd stage export, post-dispatch, we run _match variant + # instead of flash* to get the decomposition. _match variant returns + # x: [N, H, L, E] applying x.permute(2, 0, 1, 3) returns + # x: [L, N, H, E] and without converting this to contiguous tensor + # subsequent view is not valid and the export fails + # solution is to maintain the return tensor view from the decomp to be + # exactly same as *flash* variant. + # flash variants output is contiguous as [N, L, H, E] + # _match variant out is contiguous as [N, H, L, E] + # out = out.transpose(1, 2).contiguous gets output as contiguous + # in [N, L, H, E]. + # Subsrequent transpose(1, 2) then returns a view on which + # aforementioned code snippet, as showm below, is valid + # x = x.permute(2, 0, 1, 3).contiguous() and the viewed via + # x = x.view(L * N, H * E) + + # Really the invariant you want to maintain is: + # pre-dispatch op-output and its decomposed representation must + # return tensor with same view and dims + output = output.transpose(1, 2).contiguous(memory_format=torch.contiguous_format) + return ( + output.transpose(1, 2), + logsumexp, + cum_seq_q, + cum_seq_k, + max_q, + max_k, + philox_seed, + philox_offset, + debug_attn_mask, + ) + + +def register_inplace(aten_op, outplace_op): + @register_decomposition(aten_op) + def inplace_op(*args, **kwargs): + out = outplace_op(*args, **kwargs) + return args[0].copy_(out) + + return inplace_op + + +@register_decomposition([aten.baddbmm]) +@out_wrapper() +@pw_cast_for_opmath +def baddbmm(self, batch1, batch2, beta=1, alpha=1): + if not self.is_floating_point() and not self.is_complex(): + beta = int(beta) + alpha = int(alpha) + result = torch.bmm(batch1, batch2) + if not isinstance(alpha, numbers.Number) or alpha != 1: + result = result * alpha + if beta == 0: + return result + if not isinstance(beta, numbers.Number) or beta != 1: + self = self * beta + return self + result + + +@register_decomposition(aten.floor_divide) +@out_wrapper() +def floor_divide(self, other): + return torch.div(self, other, rounding_mode="floor") + + +@register_decomposition([aten.sum.default, aten.sum.out]) +def sum_default( + self: Tensor, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> Tensor: + if out is None: + return aten.sum.dim_IntList(self, [], dtype=dtype) + else: + return aten.sum.IntList_out(self, [], dtype=dtype, out=out) + + +@register_decomposition([aten.squeeze.default, aten.squeeze.dim]) +def squeeze_default(self: Tensor, dim: Optional[int] = None): + if dim is None: + return aten.squeeze.dims(self, list(range(self.dim()))) + else: + return aten.squeeze.dims(self, [dim]) + + +@register_decomposition(torch.ops.aten._weight_norm_interface) +def _weight_norm_interface(x, y, dim): + # https://github.com/pytorch/pytorch/blob/852f8526c52190125446adc9a6ecbcc28fb66182/aten/src/ATen/native/WeightNorm.cpp#L58 + keep_dim = tuple(i for i in range(len(x.shape)) if i != dim) + norm = x.norm(2, keep_dim, keepdim=True) + return x * (y / norm), norm + + +register_inplace(aten.addbmm_, aten.addbmm) +register_inplace(aten.addmm_, aten.addmm) +register_inplace(aten.addmv_, aten.addmv) +register_inplace(aten.baddbmm_, aten.baddbmm) +register_inplace(aten.fill_, aten.fill) +register_inplace(aten.gelu_, aten.gelu) +register_inplace(aten.hardswish_, aten.hardswish) +register_inplace(aten.hardtanh_, aten.hardtanh) +register_inplace(aten.hardsigmoid_, aten.hardsigmoid) +register_inplace(aten.__iand__, aten.__and__) +register_inplace(aten.__ilshift__, aten.__lshift__) +register_inplace(aten.index_put_, aten.index_put) +register_inplace(aten.index_reduce_, aten.index_reduce) +register_inplace(aten.__ior__, aten.__or__) +register_inplace(aten.__irshift__, aten.__rshift__) +register_inplace(aten.__ixor__, aten.__xor__) +register_inplace(aten.leaky_relu_, aten.leaky_relu) +register_inplace(aten.logit_, aten.logit) +register_inplace(aten.relu_, aten.relu) +register_inplace(aten.renorm_, aten.renorm) +register_inplace(aten.round_, aten.round) +register_inplace(aten.scatter_, aten.scatter) +register_inplace(aten.scatter_add_, aten.scatter_add) +register_inplace(aten.scatter_reduce_, aten.scatter_reduce) +register_inplace(aten.silu_, aten.silu) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py new file mode 100644 index 0000000000000000000000000000000000000000..19dfaedcce31d6d938b267a071f191062887a020 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py @@ -0,0 +1,302 @@ +import inspect +from typing import Callable, Dict, List, Optional, Tuple + +import torch +import torch._decomp +from torch import Tensor +from torch._prims_common.wrappers import _maybe_remove_out_wrapper + +decomposition_table = torch._decomp.decomposition_table +decomposition_table_for_jvp: Dict[torch._ops.OperatorBase, Callable] = {} +register_decomposition = torch._decomp.register_decomposition +aten = torch.ops.aten + +# NOTE: [forward-mode AD decompositions mechanism] +# +# The mechanism is in VariableType, +# IF any inputs have forward grad +# AND there is no forward AD formula implemented +# AND the functions is actually differentiable +# run the decomposition +# See run_jit_decomposition_with_args_for_jvp +# We currently use python decompositions that we torchscript. +# +# Note that we would be building the backward graph at the decomposed level +# too, but that is OK, because we would've errored out otherwise anyway. +# +# TODO: The mechanism we are using to register decompositions doesn't +# seem to be exclusively used for jvp. So open question here is whether +# torch/csrc/jit/runtime/decomposition_registry.cpp is being used for other things. +# If that is the case, we may go down the decomposition path unexpectedly +# (and possibly produce an unintelligible error) vs erroring out earlier and +# printing that the forward AD formula is not implemented. +# +# The solution to this may be to have a explicitly white list control when +# to enable the decomposition. + + +def maybe_register_decomposition(op): + def decorator(f): + try: + return register_decomposition(op)(f) + except Exception: + return f + + return decorator + + +# Functions where we need a special decomposition for jvp but there's another version that +# should be used more generally (ex. for jvp we need to recompute the mean and variance for +# the backwards of a normalization function. Without jvp, it should use the saved value) +decomposition_table_for_jvp = {} + + +def register_decomposition_for_jvp(fn): + return register_decomposition(fn, registry=decomposition_table_for_jvp) + + +def _register_jit_decomposition_for_jvp(decomp, use_python=False): + if decomp in decomposition_table_for_jvp: + decomposition_table_used = decomposition_table_for_jvp + elif decomp in decomposition_table: + decomposition_table_used = decomposition_table + else: + raise RuntimeError(f"could not find decomposition for {decomp}") + decomp_fn = decomposition_table_used[decomp] + + # `out_wrapper` extends a decompositions signature with + # an `out` parameter. However jit will use the unwrapped function's + # signature instead so we need to unwrap here to prevent an error + decomp_fn = _maybe_remove_out_wrapper(decomp_fn) + + if use_python: + decomp_fn = torch.jit.ignore(decomp_fn) + sig = inspect.signature(decomp_fn) + + # Create a string wrapping the function from the signature + # example output: + # def wrapped_decomp(x: torch.Tensor, y: int, z: int): + # return decomp_fn(x, y, z) + # Thanks copilot! + def get_function_def(sig): + param_def = [f"{param_str}" for param_str in sig.parameters.values()] + param_use = [f"{param_str}" for param_str in sig.parameters.keys()] + + return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n" + + f_str = get_function_def(sig) + graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph + else: + graph = torch.jit.script(decomp_fn).graph + torch.jit._register_decomposition(decomp, graph) + + +# The only decompositions here are temporary or hacks for the purposes of jvp + + +# TODO: do these also belong here? +@maybe_register_decomposition(aten.trace.default) +def trace(self: Tensor) -> Tensor: + return torch.sum(torch.diag(self)) + + +@maybe_register_decomposition(aten.log_sigmoid_forward.default) +def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]: + min = torch.minimum(self.new_zeros(()), self) + z = torch.exp(-torch.abs(self)) + if self.is_cuda: + buffer = self.new_zeros((0,)) + else: + buffer = z + return min - torch.log1p(z), buffer + + +def recompute_mean_var( + input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool +): + # for most norm decompositions, it will be the same as the core version except for here. + # We recompute the mean and variance so that they track gradients through input + + mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim) + var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim) + eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside + eps = eps.detach() + rstd = 1 / torch.sqrt(var + eps) + return mean, rstd + + +@register_decomposition_for_jvp(aten.native_layer_norm_backward) +def native_layer_norm_backward( + grad_out: Tensor, + input: Tensor, + normalized_shape: List[int], + mean: Tensor, + rstd: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + output_mask: List[bool], +) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: + input_shape = input.shape + input_ndim = input.dim() + + axis = input_ndim - len(normalized_shape) + inner_dims = input_shape[axis:] + outer_dims = input_shape[:axis] + inner_dim_indices = list(range(axis, input_ndim)) + outer_dim_indices = list(range(0, axis)) + + N = 1 + for i in inner_dims: + N *= i + M = 1 + for i in outer_dims: + M *= i + if M <= 0 or N <= 0: + return ( + input.new_zeros(input_shape), + input.new_zeros(input_shape[axis:]), + input.new_zeros(input_shape[axis:]), + ) + + mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True) + + x_hat = (input - mean_) * rstd_ + if weight is not None: + grad_x_hat = grad_out * weight + else: + grad_x_hat = grad_out + a = grad_x_hat * N + b = torch.sum(grad_x_hat, inner_dim_indices, True) + c1 = torch.mul(grad_x_hat, x_hat) + c2 = torch.sum(c1, inner_dim_indices, True) + c3 = torch.mul(x_hat, c2) + inner = a - b - c3 + + if output_mask[0]: + d_input: Optional[Tensor] = (rstd_ / N) * inner + else: + d_input = torch.zeros_like(input) # should be None but doesn't work with vjp + + if output_mask[1] and weight is not None: + if len(outer_dim_indices) > 0: + d_weight: Optional[Tensor] = torch.sum( + grad_out * x_hat, outer_dim_indices, False + ) + else: + d_weight = grad_out * x_hat + elif weight is not None: + d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp + else: + d_weight = torch.zeros(()) # should be None but doesn't work with vjp + + if output_mask[2] and bias is not None: + if len(outer_dim_indices) > 0: + d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False) + else: + d_bias = grad_out.clone() + elif bias is not None: + d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp + else: + d_bias = torch.zeros(()) # should be None but doesn't work with vjp + + return (d_input, d_weight, d_bias) + + +def prod(x: List[int]): + r = 1 + for i in x: + r *= i + return r + + +@register_decomposition_for_jvp(aten.native_batch_norm_backward) +def native_batch_norm_backward( + grad_out: Tensor, + input: Tensor, + weight: Optional[Tensor], + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + save_mean: Optional[Tensor], + save_invstd: Optional[Tensor], + train: bool, + eps: float, + output_mask: List[bool], +) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: + input_shape = input.shape + input_rank = input.dim() + assert input_rank >= 2, "rank of the input must be at least 2" + + axis = 1 + num_features = prod(input_shape) / input_shape[axis] # type: ignore[arg-type] + mean = save_mean + invstd = save_invstd + if train: + assert ( + save_mean is not None and save_invstd is not None + ), "when train=True, save_mean and save_invstd are required" + + reduciton_dims = [0] + list(range(2, input.dim())) + assert invstd is not None # for typing + mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False) + else: + assert running_mean is not None and running_var is not None + mean = running_mean + invstd = torch.rsqrt(running_var + eps) + + assert invstd is not None and mean is not None + + broadcast_mask = [1] * input_rank + broadcast_mask[axis] = input_shape[axis] + + reduction_axes: List[int] = [] + for i in range(input_rank): + if i != axis: + reduction_axes.append(i) + + mean = torch.reshape(mean, broadcast_mask) + norm = 1.0 / num_features + grad_output_sum = torch.sum(grad_out, reduction_axes) + dot_p = torch.sum(grad_out * (input - mean), reduction_axes) + + grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask) + proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask) + + if weight is None: + grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0 + else: + grad_scale = torch.reshape(invstd * weight, broadcast_mask) + + if train: + proj = (input - mean) * proj_scale + grad_input = ((grad_out - proj) - grad_mean) * grad_scale + else: + grad_input = grad_out * grad_scale + + if output_mask[1]: + grad_weight = dot_p * invstd + elif weight is not None: + grad_weight = torch.zeros_like( + weight + ) # should be None but doesn't work with vjp + else: + grad_weight = torch.zeros(()) # should be None but doesn't work with vjp + + if output_mask[2]: + grad_bias = grad_output_sum + else: + grad_bias = torch.zeros_like( + grad_output_sum + ) # should be None but doesn't work with vjp + + return (grad_input, grad_weight, grad_bias) + + +_register_jit_decomposition_for_jvp(torch.ops.aten.trace.default, use_python=True) +_register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss_backward.default) +_register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss2d_backward.default) +_register_jit_decomposition_for_jvp(torch.ops.aten._log_softmax_backward_data.default) +_register_jit_decomposition_for_jvp(torch.ops.aten._softmax_backward_data.default) +_register_jit_decomposition_for_jvp(torch.ops.aten.log_sigmoid_forward.default) +_register_jit_decomposition_for_jvp(torch.ops.aten.native_layer_norm_backward.default) +_register_jit_decomposition_for_jvp(torch.ops.aten.native_batch_norm_backward.default) +_register_jit_decomposition_for_jvp(torch.ops.aten.cudnn_batch_norm_backward.default) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa762351171a25a39a03ff23a20adc49bba66a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py @@ -0,0 +1,263 @@ +import functools +from collections import defaultdict +from typing import Callable, Dict + +import torch +import torch._decomp as decomp +from torch._decomp import get_decompositions +from torch._ops import OpOverload + +aten = torch.ops.aten + +rng_decompositions: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict) + + +def register_rng_decomposition(aten_op): + return decomp.register_decomposition(aten_op, rng_decompositions) + + +def throw_on_non_cuda(device): + raise RuntimeError( + f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not " + f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is " + "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU." + ) + + +# TODO - We have to register many more distributions here, and also higher level +# ops like dropout which have fused implementation and can hide the rand inside. +@register_rng_decomposition(aten.rand) +def rand(shape, dtype=None, layout=torch.strided, device=None, pin_memory=False): + if device and device.type != "cuda": + throw_on_non_cuda(device) + seed, offset = PhiloxStateTracker.get_state_as_tuple() + dtype = dtype or torch.float32 + out, offset_jump = torch.ops.rngprims.philox_rand( + shape, seed, offset, None, device, dtype + ) + PhiloxStateTracker.advance_offset(offset_jump) + return out + + +@register_rng_decomposition(aten.rand_like) +def rand_like( + x: torch.Tensor, + dtype=None, + layout=None, + device=None, + pin_memory=False, + memory_format=torch.preserve_format, +): + device = device or x.device + if device.type != "cuda": + throw_on_non_cuda(device) + dtype = dtype or x.dtype + seed, offset = PhiloxStateTracker.get_state_as_tuple() + out, offset_jump = torch.ops.rngprims.philox_rand( + x.shape, seed, offset, None, device, dtype + ) + PhiloxStateTracker.advance_offset(offset_jump) + return out + + +class PhiloxState: + """ + Represents a PhiloxRngState - (seed, offset) where offset = base_offset + + relative_offset. seed and base_offset basically point to the rng state just + before tracing starts. relative offset tracks the totally consumed offset at + trace time. + """ + + def __init__(self): + self.reset() + + def reset(self): + self.seed = torch.tensor(()) + self.base_offset = torch.tensor(()) + self.relative_offset = 0 + self.offset_advanced_alteast_once = False + + def validate_state(self): + assert self.seed.numel() != 0 and self.base_offset.numel() != 0 + + def advance_offset(self, consumed_offset): + self.offset_advanced_alteast_once = True + self.relative_offset = self.relative_offset + consumed_offset + + def set_state(self, seed, base_offset, relative_offset=0): + self.seed = seed + self.base_offset = base_offset + self.relative_offset = relative_offset + + def get_state_as_tuple(self): + self.validate_state() + return (self.seed, self.base_offset + self.relative_offset) + + def get_state_as_tensor(self): + # Only needed because we override get_rng_state. + self.validate_state() + return torch.stack([self.seed, self.base_offset + self.relative_offset]) + + def set_state_from_tensor(self, state): + # Only needed because we override set_rng_state. + self.seed, self.base_offset = torch.unbind(state) + self.relative_offset = 0 + + +class PhiloxStateTracker: + """ + Singleton class to track the philox rng state during AOT Autograd tracing. + For each aot tracing instance, AOT Autograd resets this tracker and keeps + track of both forward and backward offsets. At runtime, we only care about + the total consumed forward and backward offsets. For dynamic shapes, these + offsets are a function of input shapes. Therefore, the AOT generated graphs + have additional outputs that compute total consumed forward and backward + offsets. + """ + + running_state: PhiloxState + fwd_state: PhiloxState + bwd_state: PhiloxState + + def __enter__(self): + PhiloxStateTracker.reset() + return self + + def __exit__(self, exc_type, exc_cal, exc_tb): + PhiloxStateTracker.reset() + + @classmethod + def reset(cls): + cls.running_state = PhiloxState() + cls.fwd_state = PhiloxState() + cls.bwd_state = PhiloxState() + + @classmethod + def mark_beginning_of_forward(cls): + # Tells the tracker to use fwd_state as the running state + cls.running_state = cls.fwd_state + + @classmethod + def mark_beginning_of_backward(cls): + # Tells the tracker to use bwd_state as the running state + cls.running_state = cls.bwd_state + + @classmethod + def record_state(cls, seed, offset, mode): + # Records the seed and offset tensors. These tensors are used to invoke + # the philox_rand functional primitives. + if mode == "forward": + cls.fwd_state.set_state(seed, offset) + cls.mark_beginning_of_forward() + else: + assert mode == "backward" + cls.bwd_state.set_state(seed, offset) + + @classmethod + def get_state_as_tensor(cls): + # The only reason this exists is because we override get_rng_state and + # set_rng_state during tracing. get_rng_state expects a tensor output, + # so return (seed, offset) tuple upset other parts of the program like + # ctx.saved_tensors. + + # A bad consequence is that if user saves and restores rng state, we + # have little bit of ugliness in the generated code, where we first + # concat the (seed, offset) to create a tensor for get_rng_state, and + # then split it back to get (seed, offset) tuple in set_rng_state. + + # TODO: Investigate if there is be a better way to wrap the tuple in a + # false Tensor object, and then desugar it later on. + return cls.running_state.get_state_as_tensor() + + @classmethod + def get_state_as_tuple(cls): + return cls.running_state.get_state_as_tuple() + + @classmethod + def set_state_from_tensor(cls, x): + # This is only needed because we override set_rng_state. Look at the + # comment in get_state_from_tensor method. + cls.running_state.set_state_from_tensor(x) + + @classmethod + def advance_offset(cls, consumed_offset): + cls.running_state.advance_offset(consumed_offset) + + @classmethod + def get_current_relative_offset(cls): + return cls.running_state.relative_offset + + @staticmethod + def multiple_of_4(offset): + # torch cuda rng state offset must be a multiple of 4. For inductor, as + # we sum up all the numel, the result might not be a multiple of 4. This + # method achieves that. + return (offset + 3) // 4 * 4 + + @classmethod + def get_updated_fwd_offset(cls): + # Short circuit if no rand ops were observed + if not cls.fwd_state.offset_advanced_alteast_once: + return cls.fwd_state.base_offset + return cls.multiple_of_4( + cls.fwd_state.base_offset + cls.fwd_state.relative_offset + ) + + @classmethod + def get_updated_bwd_offset(cls): + # Short circuit if no rand ops were observed + if not cls.bwd_state.offset_advanced_alteast_once: + return cls.bwd_state.base_offset + return cls.multiple_of_4( + cls.bwd_state.base_offset + cls.bwd_state.relative_offset + ) + + +# Adding more decompositions which eventually use rand_like inside decomps. +# Adding these in rng_decompositions ensures the functionalization of rand_like +# ops used in these decomps. The list is copied from inductor codebase, which +# uses it for similar purpose. +# +# Caution - These decomps do not have same accuracy as that of eager. However, +# we can't just disable them with a config flag like fallback_random, because +# for functionalization of rng ops, we have to decompose these ops. +extra_random_decomps = get_decompositions( + [ + aten.cauchy, + aten.cauchy_, + aten.exponential, + aten.exponential_, + aten.geometric, + aten.geometric_, + aten.native_dropout, + aten.normal, + aten.normal_, + aten.normal_functional, + aten.log_normal, + aten.log_normal_, + aten.rrelu_with_noise, + aten.rrelu_with_noise_, + aten.uniform_, + ] +) +register_extra_random_decomp = functools.partial( + decomp.register_decomposition, registry=extra_random_decomps +) + + +@register_extra_random_decomp([aten.bernoulli_]) +def bernoulli_(self, p=0.5): + if self.device == torch.device("cpu"): + return NotImplemented + return self.copy_(torch.rand_like(self, dtype=torch.float32) < p) + + +@register_extra_random_decomp([aten.bernoulli.p]) +def bernoulli_p(self, p=0.5, *, generator=None): + if self.device == torch.device("cpu"): + return NotImplemented + assert generator is None + return torch.rand_like(self, dtype=torch.float32) < p + + +rng_decompositions.update(extra_random_decomps) # type: ignore[arg-type] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b38e98a2f0f6f54b41fe80628c0a9df5d5933bd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29a92912e8186d6619610a7bcbb339704230c270 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/python.py b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/python.py new file mode 100644 index 0000000000000000000000000000000000000000..d80839dc7e4729b948914991c9ddf8e7f3e01cb6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_dispatch/python.py @@ -0,0 +1,178 @@ +import itertools +import unittest.mock +from contextlib import contextmanager +from typing import Iterator + +import torch +import torch._C +import torch._ops +import torch.utils._python_dispatch +import torch.utils._pytree as pytree + +__all__ = ["enable_python_dispatcher", "no_python_dispatcher", "enable_pre_dispatch"] + +no_python_dispatcher = torch._C._DisablePythonDispatcher +enable_python_dispatcher = torch._C._EnablePythonDispatcher +enable_pre_dispatch = torch._C._EnablePreDispatch + +CROSSREF_FUNCTIONALIZE = False + + +def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]: + """ + Warning: the set of overloads this will report is very subtle. It is precisely + the set of torch.ops functions that have actually been accessed from Python + (e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT + from the set of registered operators, which will in general be a larger set, + as this would include all operators which we ran C++ static initializers or + Python operator registration on. This does not eagerly populate the list on + torch.ops.aten; this list is lazy! + + In other words, this is good for traversing over everything that has an + OpOverload object allocated in Python. We use it for cache invalidation, but + don't rely on this list being complete. + + Note that even if we did report all C++ registered overloads, this isn't guaranteed + to be complete either, as a subsequent lazy load of a library which triggers more + registrations could add more things to the set. + """ + for ns in torch.ops: + packets = getattr(torch.ops, ns) + for op_name in packets: + packet = getattr(packets, op_name) + for overload in packet: + yield getattr(packet, overload) + + +@contextmanager +def suspend_functionalization(): + f_tls = torch._C._dispatch_tls_is_dispatch_key_included( + torch._C.DispatchKey.Functionalize + ) + f_rv = torch._C._functionalization_reapply_views_tls() + if f_tls: + torch._disable_functionalization() + try: + yield + finally: + if f_tls: + torch._enable_functionalization(reapply_views=f_rv) + + +def check_tensor_metadata_matches(nv, rv, desc): + assert callable(desc) + assert nv.size() == rv.size(), f"{desc()}: sizes {nv.size()} != {rv.size()}" + assert nv.dtype == rv.dtype, f"{desc()}: dtype {nv.dtype} != {rv.dtype}" + same_strides, idx = torch._prims_common.check_significant_strides( + nv, rv, only_cuda=False + ) + assert ( + same_strides + ), f"{desc()}: strides {nv.stride()} != {rv.stride()} (mismatch at index {idx})" + + +def check_metadata_matches(n, r, desc): + assert callable(desc) + n_vals, n_spec = pytree.tree_flatten(n) + r_vals, r_spec = pytree.tree_flatten(r) + # TODO: test the specs match; empirically sometimes we have a tuple + # on one side and a list on the other + assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}" + for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals): + if not isinstance(rv, torch.Tensor): + continue + check_tensor_metadata_matches(nv, rv, lambda: f"{desc()} output {i}") + + +class Lit: + def __init__(self, s): + self.s = s + + def __repr__(self): + return self.s + + +def _fmt(a: object) -> object: + if isinstance(a, torch.Tensor): + return Lit( + f"torch.empty_strided({tuple(a.size())}, {a.stride()}, dtype={a.dtype})" + ) + else: + return a + + +def make_crossref_functionalize(op, final_key): + from torch._subclasses.fake_tensor import FakeTensorMode + + # This case is pretty weird, suppress it for now + if op == torch.ops.aten.lift_fresh.default: + return final_key + + def handler(*args, **kwargs): + fake_mode = FakeTensorMode() + + def fakeify_defun(t): + if isinstance(t, torch.Tensor): + if torch._is_functional_tensor(t): + r = torch._from_functional_tensor(t) + # NB: This assumes that the inner tensor sizes/strides match + # the outer tensor sizes/strides. This doesn't necessarily have to + # be the case, see discussion at + # https://github.com/pytorch/pytorch/pull/87610/files/401ddeda1d769bedc88a12de332c7357b60e51a4#r1007264456 + assert t.size() == r.size() + assert t.stride() == r.stride() + else: + r = t + # TODO: suppress guards + return fake_mode.from_tensor(r) + return t + + def maybe_detach(t): + if isinstance(t, torch.Tensor): + return t.detach() + else: + return t + + # TODO: This probably does the wrong thing if you're running other + # substantive modes with the normal op outside here + with torch.utils._python_dispatch._disable_current_modes(), suspend_functionalization(): + f_args, f_kwargs = pytree.tree_map(fakeify_defun, (args, kwargs)) + orig_f_args, orig_f_kwargs = pytree.tree_map( + maybe_detach, (f_args, f_kwargs) + ) + with fake_mode: + f_r = op(*f_args, **f_kwargs) + r = op._op_dk(final_key, *args, **kwargs) + + def desc(): + fmt_args = ", ".join( + itertools.chain( + (repr(pytree.tree_map(_fmt, a)) for a in orig_f_args), + ( + f"{k}={pytree.tree_map(_fmt, v)}" + for k, v in orig_f_kwargs.items() + ), + ) + ) + return f"{op}({fmt_args})" + + check_metadata_matches(f_r, r, desc) + return r + + return handler + + +# NB: enabling this is slow, don't do it in a hot loop. This is purely +# for debugging purposes. +@contextmanager +def enable_crossref_functionalize(): + for op in all_py_loaded_overloads(): + op._uncache_dispatch(torch._C.DispatchKey.Functionalize) + try: + with enable_python_dispatcher(), unittest.mock.patch( + "torch._dispatch.python.CROSSREF_FUNCTIONALIZE", True + ): + yield + finally: + for op in all_py_loaded_overloads(): + op._uncache_dispatch(torch._C.DispatchKey.Functionalize) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh b/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4b1dc5f8da32e759c4df8c080b8e7630ca7c4c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh @@ -0,0 +1,3 @@ +#pragma once +// TODO: Remove once torchvision has been updated to use the ATen header +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh b/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..03294d0c846ed085e973ba7ae7a54d08dee1963b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh @@ -0,0 +1,3 @@ +#pragma once +// TODO: Remove this header +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..14fe876008d0e245127e84888d0d921a3f204667 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +#include +#include + +// TODO: rename to c10 +C10_DECLARE_bool(caffe2_report_cpu_memory_usage); + +namespace c10 { + +using MemoryDeleter = void (*)(void*); + +// A helper function that is basically doing nothing. +C10_API void NoDelete(void*); + +// A simple struct that is used to report C10's memory allocation, +// deallocation status and out-of-memory events to the profiler +class C10_API ProfiledCPUMemoryReporter { + public: + ProfiledCPUMemoryReporter() = default; + void New(void* ptr, size_t nbytes); + void OutOfMemory(size_t nbytes); + void Delete(void* ptr); + + private: + std::mutex mutex_; + std::unordered_map size_table_; + size_t allocated_ = 0; + size_t log_cnt_ = 0; +}; + +C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter(); + +// Get the CPU Allocator. +C10_API at::Allocator* GetCPUAllocator(); +// Sets the CPU allocator to the given allocator: the caller gives away the +// ownership of the pointer. +C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0); + +// Get the Default CPU Allocator +C10_API at::Allocator* GetDefaultCPUAllocator(); + +// Get the Default Mobile CPU Allocator +C10_API at::Allocator* GetDefaultMobileCPUAllocator(); + +// The CPUCachingAllocator is experimental and might disappear in the future. +// The only place that uses it is in StaticRuntime. +// Set the CPU Caching Allocator +C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0); +// Get the CPU Caching Allocator +C10_API Allocator* GetCPUCachingAllocator(); + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h new file mode 100644 index 0000000000000000000000000000000000000000..c49763f69dc34c7dedeceb8f388b609ad2380bed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +namespace c10 { + +using CopyBytesFunction = void (*)( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device); + +struct C10_API _CopyBytesFunctionRegisterer { + _CopyBytesFunctionRegisterer( + DeviceType from, + DeviceType to, + CopyBytesFunction func_sync, + CopyBytesFunction func_async = nullptr); +}; + +#define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \ + namespace { \ + static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \ + g_copy_function)(from, to, __VA_ARGS__); \ + } + +/* + * WARNING: Implementations for this function are currently registered from + * ATen and caffe2, not yet from c10. Don't use this if not either ATen + * or caffe2 is present as well. + * We can't move them yet, because the CUDA implementations aren't unified yet + * between ATen and caffe2. + * We're planning to move the implementations into c10/backend/xxx + * to make c10 self contained again. + */ +C10_API void CopyBytes( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device, + bool async); +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..bfe9af8fd5308a9cf1b9b05968df6f37f92eee69 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h @@ -0,0 +1,195 @@ +#pragma once + +#include + +namespace c10 { + +/// RAII guard that sets a certain default device in its constructor, and +/// changes it back to the device that was originally active upon destruction. +/// +/// The device is always reset to the one that was active at the time of +/// construction of the guard. Even if you `set_device` after construction, the +/// destructor will still reset the device to the one that was active at +/// construction time. +/// +/// This device guard does NOT have an uninitialized state; it is guaranteed +/// to reset a device on exit. If you are in a situation where you *might* +/// want to setup a guard (i.e., are looking for the moral equivalent +/// of optional), see OptionalDeviceGuard. +class DeviceGuard { + public: + /// No default constructor; see Note [Omitted default constructor from RAII] + explicit DeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit DeviceGuard(Device device) : guard_(device) {} + + /// This constructor is for testing only. + explicit DeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + DeviceGuard(const DeviceGuard&) = delete; + DeviceGuard& operator=(const DeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + DeviceGuard(DeviceGuard&& other) = delete; + DeviceGuard& operator=(DeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + /// + /// TODO: The consistency check here is inconsistent with StreamGuard's + /// behavior with set_stream, where a stream on a different device than + /// the original one isn't an error; we just reset the stream and then + /// switch devices. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// This method is for testing only. + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type the guard was constructed with. + void set_index(DeviceIndex index) { + guard_.set_index(index); + } + + /// Returns the device that was set at the time the guard was constructed. + Device original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device. + Device current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineDeviceGuard guard_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * Morally, a OptionalDeviceGuard is equivalent to optional, but + * with extra constructors and methods as appropriate. + * + * Besides its obvious use (optionally applying a DeviceGuard), + * OptionalDeviceGuard is often also used for the following idiom: + * + * OptionalDeviceGuard g; + * for (const auto& t : tensors) { + * g.set_device(t.device()); + * do_something_with(t); + * } + * + * This usage is marginally more efficient than constructing a DeviceGuard every + * iteration of the for loop, as it avoids an unnecessary device reset. + * + * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs + * when you use the nullary constructor, or pass a nullopt to the constructor. + * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the + * original device was and they do not reset on destruction. This is why + * original_device() and current_device() return optional rather than + * Device (as they do in DeviceGuard), and also is why we didn't just + * provide OptionalDeviceGuard by default and hide DeviceGuard from users. + * + * The semantics of an OptionalDeviceGuard are exactly explained by thinking + * of it as an optional. In particular, an initialized + * OptionalDeviceGuard doesn't restore device to its value at construction; it + * restores device to its value *at initialization*. So if you have the + * program: + * + * setDevice(1); + * OptionalDeviceGuard g; + * setDevice(2); + * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes! + * + * On destruction, g will reset device to 2, rather than 1. + * + * An uninitialized OptionalDeviceGuard is distinct from a (initialized) + * DeviceGuard whose original_device_ and current_device_ match, since the + * DeviceGuard will still reset the device to original_device_. + */ +class OptionalDeviceGuard { + public: + /// Create an uninitialized guard. Set the guard later using reset_device. + explicit OptionalDeviceGuard() = default; + + /// Initialize the guard, setting the current device to the passed Device. + explicit OptionalDeviceGuard(Device device) : guard_(device) {} + + /// Initialize the guard if a Device is passed; otherwise leave the + /// guard uninitialized. + explicit OptionalDeviceGuard(optional device) : guard_(device) {} + + /// Constructor for testing only. + explicit OptionalDeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + OptionalDeviceGuard(const OptionalDeviceGuard&) = delete; + OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete; + + /// Move is disallowed + /// See Note [Explicit initialization of optional fields] + /// and // Note [Move construction for RAII guards is tricky] + /// for rationale. + OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete; + OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// For testing only + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Returns the device that was set at the time the guard was constructed. + optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via reset_device. + optional current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineOptionalDeviceGuard guard_{}; +}; + +// Note [Whither the DeviceGuard boilerplate] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Design note: in principle, we could avoid these wrappers using: +// +// using DeviceGuard = impl::InlineDeviceGuard; +// using OptionalDeviceGuard = +// impl::InlineOptionalDeviceGuard; +// +// But the error messages are worse, and our users can't just look at the +// header file to find out what's going on. Furthermore, for specializations +// like CUDAStreamGuard, it can be profitable to replace some interfaces with +// refined types (e.g., return CUDAStream instead of Stream). So, we eat +// the boilerplate and write out the API explicitly. + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h new file mode 100644 index 0000000000000000000000000000000000000000..f535fd3b7e60f2183157b76149f974899ac21a46 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h @@ -0,0 +1,117 @@ +#pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +#include + +#include +#include + +namespace c10 { + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(MTIA, extra) \ + _(PrivateUse1, extra) + +enum class DeviceType : int8_t { + CPU = 0, + CUDA = 1, // CUDA. + MKLDNN = 2, // Reserved for explicit MKLDNN + OPENGL = 3, // OpenGL + OPENCL = 4, // OpenCL + IDEEP = 5, // IDEEP. + HIP = 6, // AMD HIP + FPGA = 7, // FPGA + ORT = 8, // ONNX Runtime / Microsoft + XLA = 9, // XLA / TPU + Vulkan = 10, // Vulkan + Metal = 11, // Metal + XPU = 12, // XPU + MPS = 13, // MPS + Meta = 14, // Meta (tensors with no data) + HPU = 15, // HPU / HABANA + VE = 16, // SX-Aurora / NEC + Lazy = 17, // Lazy Tensors + IPU = 18, // Graphcore IPU + MTIA = 19, // Meta training and inference devices + PrivateUse1 = 20, // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES = 21, +}; + +constexpr DeviceType kCPU = DeviceType::CPU; +constexpr DeviceType kCUDA = DeviceType::CUDA; +constexpr DeviceType kHIP = DeviceType::HIP; +constexpr DeviceType kFPGA = DeviceType::FPGA; +constexpr DeviceType kORT = DeviceType::ORT; +constexpr DeviceType kXLA = DeviceType::XLA; +constexpr DeviceType kMPS = DeviceType::MPS; +constexpr DeviceType kMeta = DeviceType::Meta; +constexpr DeviceType kVulkan = DeviceType::Vulkan; +constexpr DeviceType kMetal = DeviceType::Metal; +constexpr DeviceType kXPU = DeviceType::XPU; +constexpr DeviceType kHPU = DeviceType::HPU; +constexpr DeviceType kVE = DeviceType::VE; +constexpr DeviceType kLazy = DeviceType::Lazy; +constexpr DeviceType kIPU = DeviceType::IPU; +constexpr DeviceType kMTIA = DeviceType::MTIA; +constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1; + +// define explicit int constant +constexpr int COMPILE_TIME_MAX_DEVICE_TYPES = + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES); + +static_assert( + COMPILE_TIME_MAX_DEVICE_TYPES <= 21, + "Hey! You seem to be adding a lot of new DeviceTypes. The intent was " + "for this constant to reflect the actual number of DeviceTypes we support " + "in PyTorch; it's important that this number is not too large as we " + "use this to allocate stack arrays in some places in our code. If you " + "are indeed just adding the 20th device type, feel free to change " + "the check to 32; but if you are adding some sort of extensible device " + "types registration, please be aware that you are affecting code that " + "this number is small. Try auditing uses of this constant."); + +C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false); + +C10_API bool isValidDeviceType(DeviceType d); + +C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type); + +C10_API void register_privateuse1_backend(const std::string& backend_name); +C10_API std::string get_privateuse1_backend(bool lower_case = true); + +} // namespace c10 + +namespace std { +template <> +struct hash { + std::size_t operator()(c10::DeviceType k) const { + return std::hash()(static_cast(k)); + } +}; +} // namespace std + +namespace torch { +using c10::DeviceType; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Event.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Event.h new file mode 100644 index 0000000000000000000000000000000000000000..7b43f021b670853382373a6f938d9829a44ea9fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Event.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include + +namespace c10 { + +/** + * A backend-generic movable, not copyable, not thread-safe event. + * + * The design of this event follows that of CUDA and HIP events. These events + * are recorded and waited on by streams and can be rerecorded to, + * each rerecording essentially creating a new version of the event. + * For example, if (in CPU time), stream X is asked to record E, + * stream Y waits on E, and stream X is asked to record E again, then Y will + * wait for X to finish the first call to record and not the second, because + * it's waiting on the first version of event E, not the second. + * Querying an event only returns the status of its most recent version. + * + * Backend-generic events are implemented by this class and + * impl::InlineEvent. In addition to these events there are also + * some backend-specific events, like ATen's CUDAEvent. Each of these + * classes has its own use. + * + * impl::InlineEvent<...> or a backend-specific event should be + * preferred when the backend is known at compile time and known to + * be compiled. Backend-specific events may have additional functionality. + * + * This Event should be used if a particular backend may not be available, + * or the backend required is not known at compile time. + * + * These generic events are built on top of DeviceGuardImpls, analogous + * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls," + * is no longer entirely accurate, as these classes implement the + * backend-specific logic for a generic backend interface. + * + * See DeviceGuardImplInterface.h for a list of all supported flags. + */ + +struct Event final { + // Constructors + Event() = delete; + Event( + const DeviceType _device_type, + const EventFlag _flag = EventFlag::PYTORCH_DEFAULT) + : impl_{_device_type, _flag} {} + + // Copy constructor and copy assignment operator (deleted) + Event(const Event&) = delete; + Event& operator=(const Event&) = delete; + + // Move constructor and move assignment operator + Event(Event&&) noexcept = default; + Event& operator=(Event&&) noexcept = default; + + // Destructor + ~Event() = default; + + // Getters + Device device() const noexcept { + return Device(device_type(), device_index()); + } + DeviceType device_type() const noexcept { + return impl_.device_type(); + } + DeviceIndex device_index() const noexcept { + return impl_.device_index(); + } + EventFlag flag() const noexcept { + return impl_.flag(); + } + bool was_marked_for_recording() const noexcept { + return impl_.was_marked_for_recording(); + } + + /** + * Calls record() if and only if record() has never been called for this + * event. Note: because Event is not thread-safe recordOnce() may call + * record() multiple times if called from multiple threads. + */ + void recordOnce(const Stream& stream) { + impl_.recordOnce(stream); + } + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it notifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + void record(const Stream& stream) { + impl_.record(stream); + } + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + void block(const Stream& stream) const { + impl_.block(stream); + } + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + bool query() const { + return impl_.query(); + } + + private: + impl::InlineEvent impl_; +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h new file mode 100644 index 0000000000000000000000000000000000000000..23cb5112904878b96b48f7e28aa036e1a0340114 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +// A RAII, thread local (!) guard that enables or disables inference mode upon +// construction, and sets it back to the original value upon destruction. +struct C10_API InferenceMode { + // Note [Expected TLS state in InferenceMode]: + // InferenceMode: ADInplaceOrView not in + // raw_local_dispatch_key_set.included(), + // Autograd in raw_local_dispatch_key_set.excluded() + // GradMode is disabled. + // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(), + // Autograd not in raw_local_dispatch_key_set.excluded() + // GradMode is enabled by default unless toggled manually + // through other APIs, e.g. NoGradGuard. + // + // Invariant: + // - ADInplaceOrView is never in the excluded set + // - Autograd is never in the included set + // - Setting InferenceMode will set GradMode accordingly, but not vice versa. + // + // 1. Why do we put ADInplaceOrView in included set outside InferenceMode? + // + // Inplace update to inference tensor outside InferenceMode is not + // allowed. See Note [Inplace update inference tensor] for more details. + // Without going through ADInplaceOrView kernel, we cannot throw error + // for `inference_tensor.add_(1)` case. + // + // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode? + // + // For example: + // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true); + // torch::Tensor k = a + 2; + // { + // c10::InferenceMode guard(true); + // k.add_(2); + // } + // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's + // prepared for future autograd. + // + // 3. Why does setting InferenceMode also set GradMode? + // + // This is required since InferenceMode is a faster and more restrictive + // version of NoGradGuard. All runtime checks using GradMode::is_enabled() + // are applicable to InferenceMode as well, e.g. + // `tensorTypeInCurrentExecutionContext` in interpreter.cpp. + InferenceMode(bool enabled = true) + : prev_mode(AutogradState::get_tls_state()), + prev_keyset(c10::impl::tls_local_dispatch_key_set()) { + // Enabling inference mode means disabling grad modes + // And disabling inference mode means enabling grad modes + AutogradState::set_tls_state(AutogradState( + /* grad_mode */ !enabled, + /* inference_mode */ enabled, + /* fw_grad_mode */ !enabled, + /* multithreading_enabled*/ !enabled)); + DispatchKeySet included = enabled + ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView) + : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView); + DispatchKeySet excluded = enabled + ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset) + : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset); + c10::impl::PODLocalDispatchKeySet cur_keyset{}; + cur_keyset.set_included(included); + cur_keyset.set_excluded(excluded); + c10::impl::_force_tls_local_dispatch_key_set(cur_keyset); + } + + ~InferenceMode() { + AutogradState::set_tls_state(prev_mode); + c10::impl::_force_tls_local_dispatch_key_set(prev_keyset); + } + static bool is_enabled(); + + private: + AutogradState prev_mode; + c10::impl::LocalDispatchKeySet prev_keyset; +}; +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h new file mode 100644 index 0000000000000000000000000000000000000000..351c038132a21767cdf4bbaffc263dbca949aa91 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10 { + +// A PyHandleCache represents a cached pointer from a C++ object to +// a Python object that represents that object analogously in Python. +// Upon a cache hit, the relevant object can be retrieved after a test +// and then a memory load. Two conditions must hold to be able to use this +// class: +// +// - This must truly be a cache; e.g., the caller must be able to produce +// the object some other way if the cache hit misses. +// +// - This must truly be a handle; e.g., the Python object referenced by +// this class must have static lifetime. This means we don't have to +// maintain strong ownership or deallocate the object when the C++ object +// dies. Static lifetime is a good idea in conjunction with the cache, +// since if you are producing a fresh object on miss you won't be +// maintaining object identity. If you need bidirectional ownership, +// you will want to factor out the pattern in TensorImpl with +// resurrection. +// +// This cache is expected to not improve perf under torchdeploy, as one +// interpreter will fill up the cache, and all the interpreters will be +// unable to use the slot. A potential improvement is to have multiple +// slots (one per interpreter), which will work in deployment scenarios +// where there a stable, fixed number of interpreters. You can also store +// the relevant state in the Python library, rather than in the non-Python +// library (although in many cases, this is not convenient, as there may +// not be a way to conveniently index based on the object.) +class PyHandleCache { + public: + PyHandleCache() : pyinterpreter_(nullptr), data_(nullptr) {} + + // Attempt to fetch the pointer from the cache, if the PyInterpreter + // matches. If it doesn't exist, or the cache entry is not valid, + // use slow_accessor to get the real pointer value and return that + // (possibly writing it to the cache, if the cache entry is + // available.) + template + PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor) + const { + // Note [Memory ordering on Python interpreter tag] + impl::PyInterpreter* interpreter = + pyinterpreter_.load(std::memory_order_acquire); + if (C10_LIKELY(interpreter == self_interpreter)) { + return data_; + } else if (interpreter == nullptr) { + auto* r = slow_accessor(); + impl::PyInterpreter* expected = nullptr; + // attempt to claim this cache entry with the specified interpreter tag + if (pyinterpreter_.compare_exchange_strong( + expected, self_interpreter, std::memory_order_acq_rel)) { + data_ = r; + } + // This shouldn't be possible, as you should be GIL protected + TORCH_INTERNAL_ASSERT(expected != self_interpreter); + return r; + } else { + return slow_accessor(); + } + } + + private: + mutable std::atomic pyinterpreter_; + mutable PyObject* data_; +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..71eb4b34ac9e11938eb45b86dca83cbe1a27acfa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +/** + * QEngine is an enum that is used to select the engine to run quantized ops. + * Keep this enum in sync with get_qengine_id() in + * torch/backends/quantized/__init__.py + */ +enum class QEngine : uint8_t { + NoQEngine = 0, + FBGEMM = 1, + QNNPACK = 2, + ONEDNN = 3, + X86 = 4, +}; + +constexpr auto kNoQEngine = QEngine::NoQEngine; +constexpr auto kFBGEMM = QEngine::FBGEMM; +constexpr auto kQNNPACK = QEngine::QNNPACK; +constexpr auto kONEDNN = QEngine::ONEDNN; +constexpr auto kX86 = QEngine::X86; + +inline std::string toString(QEngine qengine) { + switch (qengine) { + case kNoQEngine: + return "NoQEngine"; + case kFBGEMM: + return "FBGEMM"; + case kQNNPACK: + return "QNNPACK"; + case kONEDNN: + return "ONEDNN"; + case kX86: + return "X86"; + default: + TORCH_CHECK( + false, "Unrecognized Quantized Engine: ", static_cast(qengine)); + } +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h new file mode 100644 index 0000000000000000000000000000000000000000..4f6e59219031df3127d2af700edc24e6ba5b2f15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { + +// A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr +// to implement a shared DataPtr. Normally, a DataPtr is unique, but we use +// this custom context and the `refcounted_deleter` function below to make the +// DataPtr act like a non-unique DataPtr. This context object holds onto an +// inner context and deleter function which handle the actual deletion of the +// data when the refcount reaches 0. +// +// This shared DataPtr feature is only used when storages are shared between +// multiple Python interpreters in MultiPy. Before storages had PyObject +// preservation, interpreters could just share the same StorageImpl instance. +// But now a StorageImpl can only be associated with one interpreter in order +// to properly manage a zombie PyObject. So we share storages across Python +// interpreters by creating a different StorageImpl instance for each one, but +// they all point to the same data. +struct C10_API RefcountedDeleterContext { + RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter) + : other_ctx(other_ctx, other_deleter), refcount(1) {} + + std::unique_ptr other_ctx; + std::atomic_int refcount; +}; + +// `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement +// a shared DataPtr. +// +// Warning: This should only be called on a pointer to +// a RefcountedDeleterContext that was allocated on the heap with `new`, +// because when the refcount reaches 0, the context is deleted with `delete` +C10_API void refcounted_deleter(void* ctx_); + +// If the storage's DataPtr does not use `refcounted_deleter`, replace it with +// a DataPtr that does, so it can be shared between multiple StorageImpls +C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage); + +// Create a new StorageImpl that points to the same data. If the original +// StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced +// with one that does +C10_API c10::Storage newStorageImplFromRefcountedDataPtr( + const c10::Storage& storage); + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h new file mode 100644 index 0000000000000000000000000000000000000000..d07c9c4fcadb211f4eebf7f3893b030ea9f2548b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +// This is an safe owning holder for a PyObject, akin to pybind11's +// py::object, with two major differences: +// +// - It is in c10/core; i.e., you can use this type in contexts where +// you do not have a libpython dependency +// +// - It is multi-interpreter safe (ala torchdeploy); when you fetch +// the underlying PyObject* you are required to specify what the current +// interpreter context is and we will check that you match it. +// +// It is INVALID to store a reference to a Tensor object in this way; +// you should just use TensorImpl directly in that case! +struct C10_API SafePyObject { + // Steals a reference to data + SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + SafePyObject(SafePyObject&& other) noexcept + : data_(std::exchange(other.data_, nullptr)), + pyinterpreter_(other.pyinterpreter_) {} + + // In principle this could be copyable if we add an incref to PyInterpreter + // but for now it's easier to just disallow it. + SafePyObject(SafePyObject const&) = delete; + SafePyObject& operator=(SafePyObject const&) = delete; + + ~SafePyObject() { + if (data_ != nullptr) { + (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false); + } + } + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + + // stop tracking the current object, and return it + PyObject* release() { + auto rv = data_; + data_ = nullptr; + return rv; + } + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +// Like SafePyObject, but non-owning. Good for references to global PyObjects +// that will be leaked on interpreter exit. You get a copy constructor/assign +// this way. +struct C10_API SafePyHandle { + SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {} + SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + void reset() { + data_ = nullptr; + pyinterpreter_ = nullptr; + } + operator bool() { + return data_; + } + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..755e00c9da001274e1368bf9c7a5d0f3157e1124 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h @@ -0,0 +1,672 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +// For the macros below: +// NB: If you want to macro some code for all non-QInt scalar types (i.e. types +// with complete information, you probably want one of the +// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND +// macros below, which are designed to behave similarly to the Dispatch macros +// with the same name. + +// NB: Order matters for this macro; it is relied upon in +// _promoteTypesLookup and the serialization format. +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \ + _(uint8_t, Byte) /* 0 */ \ + _(int8_t, Char) /* 1 */ \ + _(int16_t, Short) /* 2 */ \ + _(int, Int) /* 3 */ \ + _(int64_t, Long) /* 4 */ \ + _(at::Half, Half) /* 5 */ \ + _(float, Float) /* 6 */ \ + _(double, Double) /* 7 */ \ + _(c10::complex, ComplexHalf) /* 8 */ \ + _(c10::complex, ComplexFloat) /* 9 */ \ + _(c10::complex, ComplexDouble) /* 10 */ \ + _(bool, Bool) /* 11 */ \ + _(c10::qint8, QInt8) /* 12 */ \ + _(c10::quint8, QUInt8) /* 13 */ \ + _(c10::qint32, QInt32) /* 14 */ \ + _(at::BFloat16, BFloat16) /* 15 */ \ + _(c10::quint4x2, QUInt4x2) /* 16 */ \ + _(c10::quint2x4, QUInt2x4) /* 17 */ \ + _(c10::bits1x8, Bits1x8) /* 18 */ \ + _(c10::bits2x4, Bits2x4) /* 19 */ \ + _(c10::bits4x2, Bits4x2) /* 20 */ \ + _(c10::bits8, Bits8) /* 21 */ \ + _(c10::bits16, Bits16) /* 22 */ \ + _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \ + _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \ + _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \ + _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ + +// If you want to support ComplexHalf for real, add ComplexHalf +// into this macro (and change the name). But beware: convert() +// doesn't work for all the conversions you need... +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) \ + _(at::Float8_e5m2, Float8_e5m2) \ + _(at::Float8_e4m3fn, Float8_e4m3fn) + +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexHalf) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) \ + _(at::Float8_e5m2, Float8_e5m2) \ + _(at::Float8_e4m3fn, Float8_e4m3fn) \ + _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \ + _(at::Float8_e4m3fnuz, Float8_e4m3fnuz) + +enum class ScalarType : int8_t { +#define DEFINE_ST_ENUM_VAL_(_1, n) n, + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_) +#undef DEFINE_ENUM_ST_ENUM_VAL_ + Undefined, + NumOptions +}; + +constexpr uint16_t NumScalarTypes = + static_cast(ScalarType::NumOptions); + +namespace impl { + +// These are used to map ScalarTypes to C++ types. + +template +struct ScalarTypeToCPPType; + +#define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \ + template <> \ + struct ScalarTypeToCPPType { \ + using type = cpp_type; \ + \ + /* This is a workaround for the CUDA bug which prevents */ \ + /* ::detail::ScalarTypeToCType::type being used directly due to */ \ + /* ambiguous reference which can't to be resolved. For some reason it */ \ + /* can't pick between at::detail and at::cuda::detail. */ \ + /* For repro example, please see: */ \ + /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \ + /* TODO: remove once the bug is fixed. */ \ + static type t; \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType) + +#undef SPECIALIZE_ScalarTypeToCPPType + +template +using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType::type; + +} // namespace impl + +template +struct CppTypeToScalarType; + +#define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \ + template <> \ + struct CppTypeToScalarType \ + : std:: \ + integral_constant { \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType) + +#undef SPECIALIZE_CppTypeToScalarType + +#define AT_FORALL_INT_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) + +#define AT_FORALL_SCALAR_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) + +#define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE>::t), \ + SCALARTYPE) + +#define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) + +#define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) + +#define AT_FORALL_SCALAR_TYPES_AND4( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE4>::t), \ + SCALARTYPE4) + +#define AT_FORALL_SCALAR_TYPES_AND5( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE4>::t), \ + SCALARTYPE4) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE5>::t), \ + SCALARTYPE5) + +#define AT_FORALL_SCALAR_TYPES_AND6( \ + SCALARTYPE1, \ + SCALARTYPE2, \ + SCALARTYPE3, \ + SCALARTYPE4, \ + SCALARTYPE5, \ + SCALARTYPE6, \ + _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE4>::t), \ + SCALARTYPE4) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE5>::t), \ + SCALARTYPE5) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE6>::t), \ + SCALARTYPE6) + +#define AT_FORALL_SCALAR_TYPES_AND7( \ + SCALARTYPE1, \ + SCALARTYPE2, \ + SCALARTYPE3, \ + SCALARTYPE4, \ + SCALARTYPE5, \ + SCALARTYPE6, \ + SCALARTYPE7, \ + _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE4>::t), \ + SCALARTYPE4) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE5>::t), \ + SCALARTYPE5) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE6>::t), \ + SCALARTYPE6) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE7>::t), \ + SCALARTYPE7) + +#define AT_FORALL_QINT_TYPES(_) \ + _(c10::qint8, QInt8) \ + _(c10::quint8, QUInt8) \ + _(c10::qint32, QInt32) \ + _(c10::quint4x2, QUInt4x2) \ + _(c10::quint2x4, QUInt2x4) + +#define AT_FORALL_COMPLEX_TYPES(_) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) + +#define DEFINE_CONSTANT(_, name) \ + constexpr ScalarType k##name = ScalarType::name; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT) +#undef DEFINE_CONSTANT + +static inline const char* toString(ScalarType t) { +#define DEFINE_CASE(_, name) \ + case ScalarType::name: \ + return #name; + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE) + default: + return "UNKNOWN_SCALAR"; + } +#undef DEFINE_CASE +} + +static inline size_t elementSize(ScalarType t) { +#define CASE_ELEMENTSIZE_CASE(ctype, name) \ + case ScalarType::name: \ + return sizeof(ctype); + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE) + default: + TORCH_CHECK(false, "Unknown ScalarType"); + } +#undef CASE_ELEMENTSIZE_CASE +} + +static inline bool isIntegralType(ScalarType t, bool includeBool) { + bool isIntegral = + (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int || + t == ScalarType::Long || t == ScalarType::Short); + + return isIntegral || (includeBool && t == ScalarType::Bool); +} + +C10_DEPRECATED_MESSAGE( + "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.") +static inline bool isIntegralType(ScalarType t) { + return isIntegralType(t, /*includeBool=*/false); +} + +static inline bool isFloat8Type(ScalarType t) { + return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz || + t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz; +} + +static inline bool isReducedFloatingType(ScalarType t) { + return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t); +} + +static inline bool isFloatingType(ScalarType t) { + return t == ScalarType::Double || t == ScalarType::Float || + isReducedFloatingType(t); +} + +static inline bool isComplexType(ScalarType t) { + return ( + t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat || + t == ScalarType::ComplexDouble); +} + +static inline bool isQIntType(ScalarType t) { + // Don't forget to extend this when adding new QInt types + return t == ScalarType::QInt8 || t == ScalarType::QUInt8 || + t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 || + t == ScalarType::QUInt2x4; +} + +static inline bool isBitsType(ScalarType t) { + return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 || + t == ScalarType::Bits4x2 || t == ScalarType::Bits8 || + t == ScalarType::Bits16; +} + +static inline ScalarType toQIntType(ScalarType t) { + switch (t) { + case ScalarType::Byte: + return ScalarType::QUInt8; + case ScalarType::Char: + return ScalarType::QInt8; + case ScalarType::Int: + return ScalarType::QInt32; + default: + return t; + } +} + +static inline ScalarType toUnderlying(ScalarType t) { + switch (t) { + case ScalarType::QUInt8: + return ScalarType::Byte; + case ScalarType::QInt8: + return ScalarType::Char; + case ScalarType::QInt32: + return ScalarType::Int; + case ScalarType::QUInt4x2: + return ScalarType::Byte; + case ScalarType::QUInt2x4: + return ScalarType::Byte; + default: + return t; + } +} + +static inline bool isSignedType(ScalarType t) { + TORCH_CHECK(!isQIntType(t), "isSignedType not supported for quantized types"); +#define CASE_SIGNED(ctype, name) \ + case ScalarType::name: \ + return std::numeric_limits::is_signed; + + switch (t) { + case ScalarType::Bits1x8: + case ScalarType::Bits2x4: + case ScalarType::Bits4x2: + case ScalarType::Bits8: + case ScalarType::Bits16: + TORCH_CHECK(false, "Bits types are undefined"); + case ScalarType::ComplexHalf: + case ScalarType::ComplexFloat: + case ScalarType::ComplexDouble: + return true; + AT_FORALL_SCALAR_TYPES_AND5( + Half, Bool, BFloat16, Float8_e5m2, Float8_e4m3fn, CASE_SIGNED) + default: + TORCH_CHECK(false, "Unknown ScalarType"); + } +#undef CASE_SIGNED +} + +static inline bool isUnderlying(ScalarType type, ScalarType qtype) { + return type == toUnderlying(qtype); +} + +static inline ScalarType toRealValueType(ScalarType t) { + switch (t) { + case ScalarType::ComplexHalf: + return ScalarType::Half; + case ScalarType::ComplexFloat: + return ScalarType::Float; + case ScalarType::ComplexDouble: + return ScalarType::Double; + default: + return t; + } +} + +static inline ScalarType toComplexType(ScalarType t) { + switch (t) { + case ScalarType::BFloat16: + // BFloat16 has range equivalent to Float, + // so we map it to ComplexFloat. + return ScalarType::ComplexFloat; + case ScalarType::Half: + return ScalarType::ComplexHalf; + case ScalarType::Float: + return ScalarType::ComplexFloat; + case ScalarType::Double: + return ScalarType::ComplexDouble; + case ScalarType::ComplexHalf: + return ScalarType::ComplexHalf; + case ScalarType::ComplexFloat: + return ScalarType::ComplexFloat; + case ScalarType::ComplexDouble: + return ScalarType::ComplexDouble; + default: + TORCH_CHECK(false, "Unknown Complex ScalarType for ", t); + } +} + +// see tensor_attributes.rst for detailed explanation and examples +// of casting rules. +static inline bool canCast(const ScalarType from, const ScalarType to) { + // We disallow complex -> non complex, e.g., float_tensor *= complex is + // disallowed. + if (isComplexType(from) && !isComplexType(to)) { + return false; + } + // We disallow float -> integral, e.g., int_tensor *= float is disallowed. + if (isFloatingType(from) && isIntegralType(to, false)) { + return false; + } + + // Treat bool as a distinct "category," to be consistent with type promotion + // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same + // category as `bool_tensor`, we would not promote. Differing categories + // implies `bool_tensor += 5` is disallowed. + // + // NB: numpy distinguishes "unsigned" as a category to get the desired + // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because: + // * We don't want the performance hit of checking the runtime sign of + // Scalars. + // * `uint8_tensor + 5 -> int64_tensor` would be undesirable. + if (from != ScalarType::Bool && to == ScalarType::Bool) { + return false; + } + return true; +} + +static inline ScalarType promoteTypes(ScalarType a, ScalarType b) { + // This is generated according to NumPy's promote_types + constexpr auto u1 = ScalarType::Byte; + constexpr auto i1 = ScalarType::Char; + constexpr auto i2 = ScalarType::Short; + constexpr auto i4 = ScalarType::Int; + constexpr auto i8 = ScalarType::Long; + constexpr auto f2 = ScalarType::Half; + constexpr auto f4 = ScalarType::Float; + constexpr auto f8 = ScalarType::Double; + constexpr auto c2 = ScalarType::ComplexHalf; + constexpr auto c4 = ScalarType::ComplexFloat; + constexpr auto c8 = ScalarType::ComplexDouble; + constexpr auto b1 = ScalarType::Bool; + constexpr auto bf = ScalarType::BFloat16; + constexpr auto ud = ScalarType::Undefined; + if (a == ud || b == ud) { + return ScalarType::Undefined; + } + + // If the two types are equal, return that type + if (a == b) { + return a; + } + + // Handle identically equal types + if (isQIntType(a) || isQIntType(b)) { + TORCH_CHECK( + false, + "promoteTypes with quantized numbers is not handled yet; figure out what the correct rules should be, offending types: ", + toString(a), + " ", + toString(b)); + } + + if (isBitsType(a) || isBitsType(b)) { + return ScalarType::Undefined; + } + + if (isFloat8Type(a) || isFloat8Type(b)) { + TORCH_CHECK( + false, + "Promotion for Float8 Types is not supported, attempted to promote ", + toString(a), + " and ", + toString(b)); + } + + // Bits, Quantized and Float8 are 14 dtypes already handled and not included + // in the promotion table below. + static constexpr int num_bits_types = static_cast(ScalarType::Bits16) - + static_cast(ScalarType::Bits1x8) + 1; + + static constexpr int num_float8_types = + static_cast(ScalarType::Float8_e4m3fnuz) - + static_cast(ScalarType::Float8_e5m2) + 1; + + static constexpr int num_qint_types = static_cast(ScalarType::QInt32) - + static_cast(ScalarType::QInt8) + 1; + + static constexpr int num_quint_types = + static_cast(ScalarType::QUInt2x4) - + static_cast(ScalarType::QUInt4x2) + 1; + + static constexpr int num_quantized_types = num_qint_types + num_quint_types; + + static constexpr int num_missing_types = + num_bits_types + num_float8_types + num_quantized_types; + + // Bfloat16 is at position 15 in the ScalerType enum, There are three types + // below bf16 not included in the table, Qint8, QUInt8, QInt32. Every other + // type above bf16, i.e. {Bits, Quantized, Float8} are not included in the + // table. + + // If either of the types is bf16, we need to shift the type down by the one + // missing section in the table that is less then bf16 i.e {QInt8, QUInt8, + // QInt32} + a = a == bf ? static_cast(static_cast(a) - num_qint_types) + : a; + b = b == bf ? static_cast(static_cast(b) - num_qint_types) + : b; + + // We decrease the promotion table by the number of missing types -> 14 + // and then subtract 1 more from the table since we don't store ud to ud + // mapping. + static constexpr int NUM_PROMOTE_TYPES = + static_cast(ScalarType::NumOptions) - num_missing_types - 1; + + // this matrix has to be consistent with + // AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS undefined is used where we + // are not sure about the correct value for type promotion. + // clang-format off + static constexpr std:: + array, NUM_PROMOTE_TYPES> + _promoteTypesLookup = {{ + /* u1 i1 i2 i4 i8 f2 f4 f8 c2 c4 c8 b1 bf*/ + /* u1 */ {u1, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, u1, bf}, + /* i1 */ {i2, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, i1, bf}, + /* i2 */ {i2, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, i2, bf}, + /* i4 */ {i4, i4, i4, i4, i8, f2, f4, f8, c2, c4, c8, i4, bf}, + /* i8 */ {i8, i8, i8, i8, i8, f2, f4, f8, c2, c4, c8, i8, bf}, + /* f2 */ {f2, f2, f2, f2, f2, f2, f4, f8, c2, c4, c8, f2, f4}, + /* f4 */ {f4, f4, f4, f4, f4, f4, f4, f8, c4, c4, c8, f4, f4}, + /* f8 */ {f8, f8, f8, f8, f8, f8, f8, f8, c8, c8, c8, f8, f8}, + /* c2 */ {c2, c2, c2, c2, c2, c2, c4, c8, c2, c4, c8, c2, c4}, + /* c4 */ {c4, c4, c4, c4, c4, c4, c4, c8, c4, c4, c8, c4, c4}, + /* c8 */ {c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8}, + /* b1 */ {u1, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, b1, bf}, + /* bf */ {bf, bf, bf, bf, bf, f4, f4, f8, c4, c4, c8, bf, bf}, + }}; + // clang-format on + return _promoteTypesLookup[static_cast(a)][static_cast(b)]; +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::ScalarType scalar_type) { + return stream << toString(scalar_type); +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..5c64e953b2ecc9f867fe3ee617de48d75cd9337f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h @@ -0,0 +1,165 @@ +#pragma once + +#include + +namespace c10 { + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * Use of StreamGuard is HIGHLY discouraged in operator definitions. In + * a single operator, you probably don't know enough about the global + * state of the world to profitably decide how to set streams. Let + * the caller handle this appropriately, and just use the current stream + * in your operator code. + * + * This StreamGuard does NOT have an uninitialized state; it is guaranteed + * to reset the stream and device on exit. If you are in a situation + * where you *might* want to setup a stream guard, see OptionalStreamGuard. + */ +struct StreamGuard { + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit StreamGuard() = delete; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit StreamGuard(Stream stream) : guard_(stream) {} + + /// Copy is disallowed + StreamGuard(const StreamGuard&) = delete; + StreamGuard& operator=(const StreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + StreamGuard(StreamGuard&& other) = delete; + StreamGuard& operator=(StreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// on , use MultiStreamGuard instead. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was constructed. + Stream original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this device guard, + /// either from construction, or via set_stream. + Stream current_stream() const { + return guard_.current_stream(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return guard_.current_device(); + } + + /// Returns the device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return guard_.original_device(); + } + + private: + c10::impl::InlineStreamGuard guard_; +}; + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See OptionalDeviceGuard for more guidance on how to use this class. + */ +struct OptionalStreamGuard { + /// Create an uninitialized guard. + explicit OptionalStreamGuard() = default; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit OptionalStreamGuard(Stream stream) : guard_(stream) {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit OptionalStreamGuard(optional stream_opt) + : guard_(stream_opt) {} + + /// Copy is disallowed + OptionalStreamGuard(const OptionalStreamGuard&) = delete; + OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + OptionalStreamGuard(OptionalStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the guard if it was not previously initialized. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was most recently + /// initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + return guard_.current_stream(); + } + + /// Restore the original device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalStreamGuard guard_{}; +}; + +/** + * A MultiStreamGuard is an RAII class that sets the current streams of a set of + * devices all at once, and resets them to their original values on destruction. + */ +struct MultiStreamGuard { + /// Set the current streams to the passed streams on each of their respective + /// devices. + explicit MultiStreamGuard(ArrayRef streams) : guard_(streams) {} + + /// Copy is disallowed + MultiStreamGuard(const MultiStreamGuard&) = delete; + MultiStreamGuard& operator=(const MultiStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + MultiStreamGuard(MultiStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete; + + private: + c10::impl::InlineMultiStreamGuard guard_; +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h new file mode 100644 index 0000000000000000000000000000000000000000..73eb1b9880ec48dc93d0588b9eac075ec4dc41b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +class C10_API SymBool { + public: + /*implicit*/ SymBool(bool b) : data_(b){}; + SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) { + TORCH_CHECK(ptr_->is_bool()); + }; + SymBool() : data_(false) {} + + SymNodeImpl* toSymNodeImplUnowned() const { + return ptr_.get(); + } + + SymNodeImpl* release() && { + return std::move(ptr_).release(); + } + + // Only valid if is_heap_allocated() + SymNode toSymNodeImpl() const; + + // Guaranteed to return a SymNode, wrapping using base if necessary + SymNode wrap_node(const SymNode& base) const; + + bool expect_bool() const { + c10::optional c = maybe_as_bool(); + TORCH_CHECK(c.has_value()); + return *c; + } + + SymBool sym_and(const SymBool&) const; + SymBool sym_or(const SymBool&) const; + SymBool sym_not() const; + + SymBool operator&(const SymBool& other) const { + return sym_and(other); + } + SymBool operator|(const SymBool& other) const { + return sym_or(other); + } + SymBool operator~() const { + return sym_not(); + } + + // Insert a guard for the bool to be its concrete value, and then return + // that value. Note that C++ comparison operations default to returning + // bool, so it's not so common to have to call this + bool guard_bool(const char* file, int64_t line) const; + bool expect_true(const char* file, int64_t line) const; + + bool has_hint() const; + + bool as_bool_unchecked() const { + return data_; + } + + c10::optional maybe_as_bool() const { + if (!is_heap_allocated()) { + return c10::make_optional(data_); + } + return toSymNodeImplUnowned()->constant_bool(); + } + + bool is_heap_allocated() const { + return ptr_; + } + + private: + // TODO: optimize to union + bool data_; + SymNode ptr_; +}; + +C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s); + +#define TORCH_SYM_CHECK(cond, ...) \ + TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) +#define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \ + TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..3b27d6ae6a6ccff606bb00fdc6e3a180e6610686 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +class SymNodeImpl; +using SymNode = c10::intrusive_ptr; + +// When you add a method, you also need to edit +// torch/csrc/jit/python/init.cpp +// torch/csrc/utils/python_symnode.h +// c10/core/ConstantSymNodeImpl.h +class C10_API SymNodeImpl : public c10::intrusive_ptr_target { + public: + ~SymNodeImpl() override = default; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + // these could be pure virtual when we implement LTC versions + virtual bool is_int() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_bool() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_float() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode add(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sub(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mul(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode truediv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode pow(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floordiv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mod(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode eq(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ne(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode gt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode lt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode le(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ge(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ceil() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floor() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode neg() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_min(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_max(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_or(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_and(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_not() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) { + TORCH_CHECK(false, "NYI"); + }; + // NB: self is ignored here, only the arguments are used + virtual SymNode is_contiguous( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode clone() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_float() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode wrap_int(int64_t num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_float(double num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_bool(bool num) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t guard_int(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_bool(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual double guard_float(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool expect_true(const char* file, int64_t line) { + // No improvement for unbacked SymBools by default, replace this + // with a better implementation! + return guard_bool(file, line); + }; + virtual bool expect_size(const char* file, int64_t line) { + // No improvement for unbacked SymInts by default, replace this + // with a better implementation! + return ge(wrap_int(0))->guard_bool(file, line); + }; + virtual int64_t int_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool bool_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool has_hint() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + virtual c10::optional singleton_int() { + return c10::nullopt; + } + virtual c10::optional singleton_coeff() { + return c10::nullopt; + } + virtual c10::optional constant_int() { + return c10::nullopt; + } + virtual c10::optional constant_bool() { + return c10::nullopt; + } + virtual c10::optional maybe_as_int() { + return c10::nullopt; + } + virtual bool is_constant() { + return false; + } + virtual bool is_symbolic() { + return true; + } + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + } +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/alignment.h b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..2877decc04d7862098828b90d51885bda40279be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/alignment.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace c10 { + +#ifdef C10_MOBILE +// Use 16-byte alignment on mobile +// - ARM NEON AArch32 and AArch64 +// - x86[-64] < AVX +constexpr size_t gAlignment = 16; +#else +// Use 64-byte alignment should be enough for computation up to AVX512. +constexpr size_t gAlignment = 64; +#endif + +constexpr size_t gPagesize = 4096; +// since the default thp pagesize is 2MB, enable thp only +// for buffers of size 2MB or larger to avoid memory bloating +constexpr size_t gAlloc_threshold_thp = 2 * 1024 * 1024; +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/clog.h b/env-llmeval/lib/python3.10/site-packages/torch/include/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..414376116ddcb558f6a318ca295f55a396379f8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/clog.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY + #if defined(__ELF__) + #define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) + #else + #define CLOG_VISIBILITY + #endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT + #if defined(__GNUC__) + #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) + #else + #define CLOG_ARGUMENTS_FORMAT + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/cpuinfo.h b/env-llmeval/lib/python3.10/site-packages/torch/include/cpuinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..1972309da14ce894fb89f8bf19f10592d4934106 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/cpuinfo.h @@ -0,0 +1,1939 @@ +#pragma once +#ifndef CPUINFO_H +#define CPUINFO_H + +#ifndef __cplusplus + #include +#endif + +#ifdef __APPLE__ + #include +#endif + +#include + +/* Identify architecture and define corresponding macro */ + +#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) + #define CPUINFO_ARCH_X86 1 +#endif + +#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) + #define CPUINFO_ARCH_X86_64 1 +#endif + +#if defined(__arm__) || defined(_M_ARM) + #define CPUINFO_ARCH_ARM 1 +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) + #define CPUINFO_ARCH_ARM64 1 +#endif + +#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) + #define CPUINFO_ARCH_PPC64 1 +#endif + +#if defined(__asmjs__) + #define CPUINFO_ARCH_ASMJS 1 +#endif + +#if defined(__wasm__) + #if defined(__wasm_simd128__) + #define CPUINFO_ARCH_WASMSIMD 1 + #else + #define CPUINFO_ARCH_WASM 1 + #endif +#endif + +/* Define other architecture-specific macros as 0 */ + +#ifndef CPUINFO_ARCH_X86 + #define CPUINFO_ARCH_X86 0 +#endif + +#ifndef CPUINFO_ARCH_X86_64 + #define CPUINFO_ARCH_X86_64 0 +#endif + +#ifndef CPUINFO_ARCH_ARM + #define CPUINFO_ARCH_ARM 0 +#endif + +#ifndef CPUINFO_ARCH_ARM64 + #define CPUINFO_ARCH_ARM64 0 +#endif + +#ifndef CPUINFO_ARCH_PPC64 + #define CPUINFO_ARCH_PPC64 0 +#endif + +#ifndef CPUINFO_ARCH_ASMJS + #define CPUINFO_ARCH_ASMJS 0 +#endif + +#ifndef CPUINFO_ARCH_WASM + #define CPUINFO_ARCH_WASM 0 +#endif + +#ifndef CPUINFO_ARCH_WASMSIMD + #define CPUINFO_ARCH_WASMSIMD 0 +#endif + +#if CPUINFO_ARCH_X86 && defined(_MSC_VER) + #define CPUINFO_ABI __cdecl +#elif CPUINFO_ARCH_X86 && defined(__GNUC__) + #define CPUINFO_ABI __attribute__((__cdecl__)) +#else + #define CPUINFO_ABI +#endif + +#define CPUINFO_CACHE_UNIFIED 0x00000001 +#define CPUINFO_CACHE_INCLUSIVE 0x00000002 +#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004 + +struct cpuinfo_cache { + /** Cache size in bytes */ + uint32_t size; + /** Number of ways of associativity */ + uint32_t associativity; + /** Number of sets */ + uint32_t sets; + /** Number of partitions */ + uint32_t partitions; + /** Line size in bytes */ + uint32_t line_size; + /** + * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing). + * + * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING + */ + uint32_t flags; + /** Index of the first logical processor that shares this cache */ + uint32_t processor_start; + /** Number of logical processors that share this cache */ + uint32_t processor_count; +}; + +struct cpuinfo_trace_cache { + uint32_t uops; + uint32_t associativity; +}; + +#define CPUINFO_PAGE_SIZE_4KB 0x1000 +#define CPUINFO_PAGE_SIZE_1MB 0x100000 +#define CPUINFO_PAGE_SIZE_2MB 0x200000 +#define CPUINFO_PAGE_SIZE_4MB 0x400000 +#define CPUINFO_PAGE_SIZE_16MB 0x1000000 +#define CPUINFO_PAGE_SIZE_1GB 0x40000000 + +struct cpuinfo_tlb { + uint32_t entries; + uint32_t associativity; + uint64_t pages; +}; + +/** Vendor of processor core design */ +enum cpuinfo_vendor { + /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */ + cpuinfo_vendor_unknown = 0, + + /* Active vendors of modern CPUs */ + + /** + * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures. + * + * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004. + */ + cpuinfo_vendor_intel = 1, + /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */ + cpuinfo_vendor_amd = 2, + /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_arm = 3, + /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_qualcomm = 4, + /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_apple = 5, + /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */ + cpuinfo_vendor_samsung = 6, + /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */ + cpuinfo_vendor_nvidia = 7, + /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_mips = 8, + /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */ + cpuinfo_vendor_ibm = 9, + /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_ingenic = 10, + /** + * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures. + * + * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies. + */ + cpuinfo_vendor_via = 11, + /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_cavium = 12, + /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_broadcom = 13, + /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_apm = 14, + /** + * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures. + * + * Processors are designed by HiSilicon, a subsidiary of Huawei. + */ + cpuinfo_vendor_huawei = 15, + /** + * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures. + * + * Processors are variants of AMD cores. + */ + cpuinfo_vendor_hygon = 16, + + /* Active vendors of embedded CPUs */ + + /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_texas_instruments = 30, + /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_marvell = 31, + /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_rdc = 32, + /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_dmp = 33, + /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */ + cpuinfo_vendor_motorola = 34, + + /* Defunct CPU vendors */ + + /** + * Transmeta Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 2004. + * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code. + */ + cpuinfo_vendor_transmeta = 50, + /** + * Cyrix Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1996. + */ + cpuinfo_vendor_cyrix = 51, + /** + * Rise Technology. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1999. + */ + cpuinfo_vendor_rise = 52, + /** + * National Semiconductor. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998. + */ + cpuinfo_vendor_nsc = 53, + /** + * Silicon Integrated Systems. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001. + */ + cpuinfo_vendor_sis = 54, + /** + * NexGen. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1994. + * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations. + */ + cpuinfo_vendor_nexgen = 55, + /** + * United Microelectronics Corporation. Vendor of x86 processor microarchitectures. + * + * Ceased x86 in the early 1990s. The last processor design was released in 1991. + * Designed U5C and U5D processors. Both are 486 level. + */ + cpuinfo_vendor_umc = 56, + /** + * Digital Equipment Corporation. Vendor of ARM processor microarchitecture. + * + * Sold its ARM designs in 1997. The last processor design was released in 1997. + */ + cpuinfo_vendor_dec = 57, +}; + +/** + * Processor microarchitecture + * + * Processors with different microarchitectures often have different instruction performance characteristics, + * and may have dramatically different pipeline organization. + */ +enum cpuinfo_uarch { + /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */ + cpuinfo_uarch_unknown = 0, + + /** Pentium and Pentium MMX microarchitecture. */ + cpuinfo_uarch_p5 = 0x00100100, + /** Intel Quark microarchitecture. */ + cpuinfo_uarch_quark = 0x00100101, + + /** Pentium Pro, Pentium II, and Pentium III. */ + cpuinfo_uarch_p6 = 0x00100200, + /** Pentium M. */ + cpuinfo_uarch_dothan = 0x00100201, + /** Intel Core microarchitecture. */ + cpuinfo_uarch_yonah = 0x00100202, + /** Intel Core 2 microarchitecture on 65 nm process. */ + cpuinfo_uarch_conroe = 0x00100203, + /** Intel Core 2 microarchitecture on 45 nm process. */ + cpuinfo_uarch_penryn = 0x00100204, + /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */ + cpuinfo_uarch_nehalem = 0x00100205, + /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */ + cpuinfo_uarch_sandy_bridge = 0x00100206, + /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */ + cpuinfo_uarch_ivy_bridge = 0x00100207, + /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */ + cpuinfo_uarch_haswell = 0x00100208, + /** Intel Broadwell microarchitecture. */ + cpuinfo_uarch_broadwell = 0x00100209, + /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */ + cpuinfo_uarch_sky_lake = 0x0010020A, + /** DEPRECATED (Intel Kaby Lake microarchitecture). */ + cpuinfo_uarch_kaby_lake = 0x0010020A, + /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */ + cpuinfo_uarch_palm_cove = 0x0010020B, + /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */ + cpuinfo_uarch_sunny_cove = 0x0010020C, + + /** Pentium 4 with Willamette, Northwood, or Foster cores. */ + cpuinfo_uarch_willamette = 0x00100300, + /** Pentium 4 with Prescott and later cores. */ + cpuinfo_uarch_prescott = 0x00100301, + + /** Intel Atom on 45 nm process. */ + cpuinfo_uarch_bonnell = 0x00100400, + /** Intel Atom on 32 nm process. */ + cpuinfo_uarch_saltwell = 0x00100401, + /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */ + cpuinfo_uarch_silvermont = 0x00100402, + /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */ + cpuinfo_uarch_airmont = 0x00100403, + /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */ + cpuinfo_uarch_goldmont = 0x00100404, + /** Intel Goldmont Plus microarchitecture (Gemini Lake). */ + cpuinfo_uarch_goldmont_plus = 0x00100405, + + /** Intel Knights Ferry HPC boards. */ + cpuinfo_uarch_knights_ferry = 0x00100500, + /** Intel Knights Corner HPC boards (aka Xeon Phi). */ + cpuinfo_uarch_knights_corner = 0x00100501, + /** Intel Knights Landing microarchitecture (second-gen MIC). */ + cpuinfo_uarch_knights_landing = 0x00100502, + /** Intel Knights Hill microarchitecture (third-gen MIC). */ + cpuinfo_uarch_knights_hill = 0x00100503, + /** Intel Knights Mill Xeon Phi. */ + cpuinfo_uarch_knights_mill = 0x00100504, + + /** Intel/Marvell XScale series. */ + cpuinfo_uarch_xscale = 0x00100600, + + /** AMD K5. */ + cpuinfo_uarch_k5 = 0x00200100, + /** AMD K6 and alike. */ + cpuinfo_uarch_k6 = 0x00200101, + /** AMD Athlon and Duron. */ + cpuinfo_uarch_k7 = 0x00200102, + /** AMD Athlon 64, Opteron 64. */ + cpuinfo_uarch_k8 = 0x00200103, + /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */ + cpuinfo_uarch_k10 = 0x00200104, + /** + * AMD Bulldozer microarchitecture + * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs. + */ + cpuinfo_uarch_bulldozer = 0x00200105, + /** + * AMD Piledriver microarchitecture + * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs. + */ + cpuinfo_uarch_piledriver = 0x00200106, + /** AMD Steamroller microarchitecture (Kaveri APUs). */ + cpuinfo_uarch_steamroller = 0x00200107, + /** AMD Excavator microarchitecture (Carizzo APUs). */ + cpuinfo_uarch_excavator = 0x00200108, + /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen = 0x00200109, + /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen2 = 0x0020010A, + /** AMD Zen 3 microarchitecture. */ + cpuinfo_uarch_zen3 = 0x0020010B, + + /** NSC Geode and AMD Geode GX and LX. */ + cpuinfo_uarch_geode = 0x00200200, + /** AMD Bobcat mobile microarchitecture. */ + cpuinfo_uarch_bobcat = 0x00200201, + /** AMD Jaguar mobile microarchitecture. */ + cpuinfo_uarch_jaguar = 0x00200202, + /** AMD Puma mobile microarchitecture. */ + cpuinfo_uarch_puma = 0x00200203, + + /** ARM7 series. */ + cpuinfo_uarch_arm7 = 0x00300100, + /** ARM9 series. */ + cpuinfo_uarch_arm9 = 0x00300101, + /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */ + cpuinfo_uarch_arm11 = 0x00300102, + + /** ARM Cortex-A5. */ + cpuinfo_uarch_cortex_a5 = 0x00300205, + /** ARM Cortex-A7. */ + cpuinfo_uarch_cortex_a7 = 0x00300207, + /** ARM Cortex-A8. */ + cpuinfo_uarch_cortex_a8 = 0x00300208, + /** ARM Cortex-A9. */ + cpuinfo_uarch_cortex_a9 = 0x00300209, + /** ARM Cortex-A12. */ + cpuinfo_uarch_cortex_a12 = 0x00300212, + /** ARM Cortex-A15. */ + cpuinfo_uarch_cortex_a15 = 0x00300215, + /** ARM Cortex-A17. */ + cpuinfo_uarch_cortex_a17 = 0x00300217, + + /** ARM Cortex-A32. */ + cpuinfo_uarch_cortex_a32 = 0x00300332, + /** ARM Cortex-A35. */ + cpuinfo_uarch_cortex_a35 = 0x00300335, + /** ARM Cortex-A53. */ + cpuinfo_uarch_cortex_a53 = 0x00300353, + /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */ + cpuinfo_uarch_cortex_a55r0 = 0x00300354, + /** ARM Cortex-A55. */ + cpuinfo_uarch_cortex_a55 = 0x00300355, + /** ARM Cortex-A57. */ + cpuinfo_uarch_cortex_a57 = 0x00300357, + /** ARM Cortex-A65. */ + cpuinfo_uarch_cortex_a65 = 0x00300365, + /** ARM Cortex-A72. */ + cpuinfo_uarch_cortex_a72 = 0x00300372, + /** ARM Cortex-A73. */ + cpuinfo_uarch_cortex_a73 = 0x00300373, + /** ARM Cortex-A75. */ + cpuinfo_uarch_cortex_a75 = 0x00300375, + /** ARM Cortex-A76. */ + cpuinfo_uarch_cortex_a76 = 0x00300376, + /** ARM Cortex-A77. */ + cpuinfo_uarch_cortex_a77 = 0x00300377, + /** ARM Cortex-A78. */ + cpuinfo_uarch_cortex_a78 = 0x00300378, + + /** ARM Neoverse N1. */ + cpuinfo_uarch_neoverse_n1 = 0x00300400, + /** ARM Neoverse E1. */ + cpuinfo_uarch_neoverse_e1 = 0x00300401, + /** ARM Neoverse V1. */ + cpuinfo_uarch_neoverse_v1 = 0x00300402, + /** ARM Neoverse N2. */ + cpuinfo_uarch_neoverse_n2 = 0x00300403, + + /** ARM Cortex-X1. */ + cpuinfo_uarch_cortex_x1 = 0x00300501, + /** ARM Cortex-X2. */ + cpuinfo_uarch_cortex_x2 = 0x00300502, + + /** ARM Cortex-A510. */ + cpuinfo_uarch_cortex_a510 = 0x00300551, + /** ARM Cortex-A710. */ + cpuinfo_uarch_cortex_a710 = 0x00300571, + + /** Qualcomm Scorpion. */ + cpuinfo_uarch_scorpion = 0x00400100, + /** Qualcomm Krait. */ + cpuinfo_uarch_krait = 0x00400101, + /** Qualcomm Kryo. */ + cpuinfo_uarch_kryo = 0x00400102, + /** Qualcomm Falkor. */ + cpuinfo_uarch_falkor = 0x00400103, + /** Qualcomm Saphira. */ + cpuinfo_uarch_saphira = 0x00400104, + + /** Nvidia Denver. */ + cpuinfo_uarch_denver = 0x00500100, + /** Nvidia Denver 2. */ + cpuinfo_uarch_denver2 = 0x00500101, + /** Nvidia Carmel. */ + cpuinfo_uarch_carmel = 0x00500102, + + /** Samsung Exynos M1 (Exynos 8890 big cores). */ + cpuinfo_uarch_exynos_m1 = 0x00600100, + /** Samsung Exynos M2 (Exynos 8895 big cores). */ + cpuinfo_uarch_exynos_m2 = 0x00600101, + /** Samsung Exynos M3 (Exynos 9810 big cores). */ + cpuinfo_uarch_exynos_m3 = 0x00600102, + /** Samsung Exynos M4 (Exynos 9820 big cores). */ + cpuinfo_uarch_exynos_m4 = 0x00600103, + /** Samsung Exynos M5 (Exynos 9830 big cores). */ + cpuinfo_uarch_exynos_m5 = 0x00600104, + + /* Deprecated synonym for Cortex-A76 */ + cpuinfo_uarch_cortex_a76ae = 0x00300376, + /* Deprecated names for Exynos. */ + cpuinfo_uarch_mongoose_m1 = 0x00600100, + cpuinfo_uarch_mongoose_m2 = 0x00600101, + cpuinfo_uarch_meerkat_m3 = 0x00600102, + cpuinfo_uarch_meerkat_m4 = 0x00600103, + + /** Apple A6 and A6X processors. */ + cpuinfo_uarch_swift = 0x00700100, + /** Apple A7 processor. */ + cpuinfo_uarch_cyclone = 0x00700101, + /** Apple A8 and A8X processor. */ + cpuinfo_uarch_typhoon = 0x00700102, + /** Apple A9 and A9X processor. */ + cpuinfo_uarch_twister = 0x00700103, + /** Apple A10 and A10X processor. */ + cpuinfo_uarch_hurricane = 0x00700104, + /** Apple A11 processor (big cores). */ + cpuinfo_uarch_monsoon = 0x00700105, + /** Apple A11 processor (little cores). */ + cpuinfo_uarch_mistral = 0x00700106, + /** Apple A12 processor (big cores). */ + cpuinfo_uarch_vortex = 0x00700107, + /** Apple A12 processor (little cores). */ + cpuinfo_uarch_tempest = 0x00700108, + /** Apple A13 processor (big cores). */ + cpuinfo_uarch_lightning = 0x00700109, + /** Apple A13 processor (little cores). */ + cpuinfo_uarch_thunder = 0x0070010A, + /** Apple A14 / M1 processor (big cores). */ + cpuinfo_uarch_firestorm = 0x0070010B, + /** Apple A14 / M1 processor (little cores). */ + cpuinfo_uarch_icestorm = 0x0070010C, + /** Apple A15 / M2 processor (big cores). */ + cpuinfo_uarch_avalanche = 0x0070010D, + /** Apple A15 / M2 processor (little cores). */ + cpuinfo_uarch_blizzard = 0x0070010E, + + /** Cavium ThunderX. */ + cpuinfo_uarch_thunderx = 0x00800100, + /** Cavium ThunderX2 (originally Broadcom Vulkan). */ + cpuinfo_uarch_thunderx2 = 0x00800200, + + /** Marvell PJ4. */ + cpuinfo_uarch_pj4 = 0x00900100, + + /** Broadcom Brahma B15. */ + cpuinfo_uarch_brahma_b15 = 0x00A00100, + /** Broadcom Brahma B53. */ + cpuinfo_uarch_brahma_b53 = 0x00A00101, + + /** Applied Micro X-Gene. */ + cpuinfo_uarch_xgene = 0x00B00100, + + /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */ + cpuinfo_uarch_dhyana = 0x01000100, + + /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */ + cpuinfo_uarch_taishan_v110 = 0x00C00100, +}; + +struct cpuinfo_processor { + /** SMT (hyperthread) ID within a core */ + uint32_t smt_id; + /** Core containing this logical processor */ + const struct cpuinfo_core* core; + /** Cluster of cores containing this logical processor */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this logical processor */ + const struct cpuinfo_package* package; +#if defined(__linux__) + /** + * Linux-specific ID for the logical processor: + * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu/ + * - Bit in the cpu_set_t identifies this logical processor + */ + int linux_id; +#endif +#if defined(_WIN32) || defined(__CYGWIN__) + /** Windows-specific ID for the group containing the logical processor. */ + uint16_t windows_group_id; + /** + * Windows-specific ID of the logical processor within its group: + * - Bit in the KAFFINITY mask identifies this logical processor within its group. + */ + uint16_t windows_processor_id; +#endif +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** APIC ID (unique x86-specific ID of the logical processor) */ + uint32_t apic_id; +#endif + struct { + /** Level 1 instruction cache */ + const struct cpuinfo_cache* l1i; + /** Level 1 data cache */ + const struct cpuinfo_cache* l1d; + /** Level 2 unified or data cache */ + const struct cpuinfo_cache* l2; + /** Level 3 unified or data cache */ + const struct cpuinfo_cache* l3; + /** Level 4 unified or data cache */ + const struct cpuinfo_cache* l4; + } cache; +}; + +struct cpuinfo_core { + /** Index of the first logical processor on this core. */ + uint32_t processor_start; + /** Number of logical processors on this core */ + uint32_t processor_count; + /** Core ID within a package */ + uint32_t core_id; + /** Cluster containing this core */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this core. */ + const struct cpuinfo_package* package; + /** Vendor of the CPU microarchitecture for this core */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture for this core */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for this core */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for this core */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the core, in Hz */ + uint64_t frequency; +}; + +struct cpuinfo_cluster { + /** Index of the first logical processor in the cluster */ + uint32_t processor_start; + /** Number of logical processors in the cluster */ + uint32_t processor_count; + /** Index of the first core in the cluster */ + uint32_t core_start; + /** Number of cores on the cluster */ + uint32_t core_count; + /** Cluster ID within a package */ + uint32_t cluster_id; + /** Physical package containing the cluster */ + const struct cpuinfo_package* package; + /** CPU microarchitecture vendor of the cores in the cluster */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture of the cores in the cluster */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register of the cores in the cluster */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) of the cores in the cluster */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */ + uint64_t frequency; +}; + +#define CPUINFO_PACKAGE_NAME_MAX 48 + +struct cpuinfo_package { + /** SoC or processor chip model name */ + char name[CPUINFO_PACKAGE_NAME_MAX]; + /** Index of the first logical processor on this physical package */ + uint32_t processor_start; + /** Number of logical processors on this physical package */ + uint32_t processor_count; + /** Index of the first core on this physical package */ + uint32_t core_start; + /** Number of cores on this physical package */ + uint32_t core_count; + /** Index of the first cluster of cores on this physical package */ + uint32_t cluster_start; + /** Number of clusters of cores on this physical package */ + uint32_t cluster_count; +}; + +struct cpuinfo_uarch_info { + /** Type of CPU microarchitecture */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for the microarchitecture */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for the microarchitecture */ + uint32_t midr; +#endif + /** Number of logical processors with the microarchitecture */ + uint32_t processor_count; + /** Number of cores with the microarchitecture */ + uint32_t core_count; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +bool CPUINFO_ABI cpuinfo_initialize(void); + +void CPUINFO_ABI cpuinfo_deinitialize(void); + +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */ + struct cpuinfo_x86_isa { + #if CPUINFO_ARCH_X86 + bool rdtsc; + #endif + bool rdtscp; + bool rdpid; + bool sysenter; + #if CPUINFO_ARCH_X86 + bool syscall; + #endif + bool msr; + bool clzero; + bool clflush; + bool clflushopt; + bool mwait; + bool mwaitx; + #if CPUINFO_ARCH_X86 + bool emmx; + #endif + bool fxsave; + bool xsave; + #if CPUINFO_ARCH_X86 + bool fpu; + bool mmx; + bool mmx_plus; + #endif + bool three_d_now; + bool three_d_now_plus; + #if CPUINFO_ARCH_X86 + bool three_d_now_geode; + #endif + bool prefetch; + bool prefetchw; + bool prefetchwt1; + #if CPUINFO_ARCH_X86 + bool daz; + bool sse; + bool sse2; + #endif + bool sse3; + bool ssse3; + bool sse4_1; + bool sse4_2; + bool sse4a; + bool misaligned_sse; + bool avx; + bool fma3; + bool fma4; + bool xop; + bool f16c; + bool avx2; + bool avx512f; + bool avx512pf; + bool avx512er; + bool avx512cd; + bool avx512dq; + bool avx512bw; + bool avx512vl; + bool avx512ifma; + bool avx512vbmi; + bool avx512vbmi2; + bool avx512bitalg; + bool avx512vpopcntdq; + bool avx512vnni; + bool avx512bf16; + bool avx512fp16; + bool avx512vp2intersect; + bool avx512_4vnniw; + bool avx512_4fmaps; + bool hle; + bool rtm; + bool xtest; + bool mpx; + #if CPUINFO_ARCH_X86 + bool cmov; + bool cmpxchg8b; + #endif + bool cmpxchg16b; + bool clwb; + bool movbe; + #if CPUINFO_ARCH_X86_64 + bool lahf_sahf; + #endif + bool fs_gs_base; + bool lzcnt; + bool popcnt; + bool tbm; + bool bmi; + bool bmi2; + bool adx; + bool aes; + bool vaes; + bool pclmulqdq; + bool vpclmulqdq; + bool gfni; + bool rdrand; + bool rdseed; + bool sha; + bool rng; + bool ace; + bool ace2; + bool phe; + bool pmm; + bool lwp; + }; + + extern struct cpuinfo_x86_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_x86_rdtsc(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.rdtsc; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdtscp(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdtscp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdpid(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdpid; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clzero(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clzero; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwait(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwait; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwaitx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwaitx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fxsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fxsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fpu(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.fpu; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx_plus(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx_plus; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_plus(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now_plus; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_geode(void) { + #if CPUINFO_ARCH_X86_64 + return false; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return false; + #else + return cpuinfo_isa.three_d_now_geode; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetch(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetch; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchwt1(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchwt1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_daz(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.daz; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse2(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse2; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_ssse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.ssse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_1(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_1; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_2(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_2; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4a(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sse4a; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_misaligned_sse(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.misaligned_sse; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma4(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma4; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xop(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xop; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_f16c(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.f16c; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512f(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512f; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512pf(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512pf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512er(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512er; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512cd(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512cd; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512dq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512dq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vl(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vl; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512ifma(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512ifma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bitalg(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bitalg; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vpopcntdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bf16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512fp16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vp2intersect(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vp2intersect; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4vnniw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4vnniw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4fmaps(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4fmaps; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_hle(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.hle; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rtm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rtm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xtest(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xtest; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mpx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mpx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmov(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmov; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg8b(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmpxchg8b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg16b(void) { + #if CPUINFO_ARCH_X86_64 + return cpuinfo_isa.cmpxchg16b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clwb(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clwb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_movbe(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.movbe; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lahf_sahf(void) { + #if CPUINFO_ARCH_X86 + return true; + #elif CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lahf_sahf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lzcnt(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lzcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_popcnt(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.popcnt; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.popcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_tbm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.tbm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_adx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.adx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_aes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vaes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vaes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_pclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.pclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vpclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vpclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_gfni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.gfni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdrand(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdrand; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdseed(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdseed; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sha(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sha; + #else + return false; + #endif +} + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */ + struct cpuinfo_arm_isa { + #if CPUINFO_ARCH_ARM + bool thumb; + bool thumb2; + bool thumbee; + bool jazelle; + bool armv5e; + bool armv6; + bool armv6k; + bool armv7; + bool armv7mp; + bool armv8; + bool idiv; + + bool vfpv2; + bool vfpv3; + bool d32; + bool fp16; + bool fma; + + bool wmmx; + bool wmmx2; + bool neon; + #endif + #if CPUINFO_ARCH_ARM64 + bool atomics; + bool bf16; + bool sve; + bool sve2; + bool i8mm; + #endif + bool rdm; + bool fp16arith; + bool dot; + bool jscvt; + bool fcma; + bool fhm; + + bool aes; + bool sha1; + bool sha2; + bool pmull; + bool crc32; + }; + + extern struct cpuinfo_arm_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_arm_thumb(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_thumb2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v5e(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv5e; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6k(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6k; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7mp(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7mp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_idiv(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.idiv; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fp16_arith(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fma(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_atomics(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.atomics; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_rdm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.rdm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16_arith(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16arith; + #elif CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fhm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fhm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_dot(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.dot; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_jscvt(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.jscvt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fcma(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fcma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_i8mm(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.i8mm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_aes(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha1(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha2(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_pmull(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.pmull; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_crc32(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.crc32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve && cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve2(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve2; + #else + return false; + #endif +} + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void); + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index); + +uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void); + +/** + * Returns upper bound on cache size. + */ +uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void); + +/** + * Identify the logical processor that executes the current thread. + * + * There is no guarantee that the thread will stay on the same logical processor for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void); + +/** + * Identify the core that executes the current thread. + * + * There is no guarantee that the thread will stay on the same core for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns 0. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns the user-specified default value. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* CPUINFO_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc74bf644f4b628018d7a9103ba63320abc466d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_H +#define DNNL_H + +#include "oneapi/dnnl/dnnl.h" + +#endif /* DNNL_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_config.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_config.h new file mode 100644 index 0000000000000000000000000000000000000000..48925e1e3ab49ae135c6e9c4c501aa2f5e030913 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_config.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_CONFIG_H +#define DNNL_CONFIG_H + +#include "oneapi/dnnl/dnnl_config.h" + +#endif /* DNNL_CONFIG_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_debug.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..5044971832bbbe56127920a527508b207a803eea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_debug.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_DEBUG_H +#define DNNL_DEBUG_H + +#include "oneapi/dnnl/dnnl_debug.h" + +#endif /* DNNL_DEBUG_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_ocl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_ocl.h new file mode 100644 index 0000000000000000000000000000000000000000..ad731150b28babe7bd5a911acd8de70c57e85254 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_ocl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_OCL_H +#define DNNL_OCL_H + +#include "oneapi/dnnl/dnnl_ocl.h" + +#endif /* DNNL_OCL_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl.h new file mode 100644 index 0000000000000000000000000000000000000000..4501598c2f461021f0fa818e95fd1972ce2d3ace --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_H +#define DNNL_SYCL_H + +#include "oneapi/dnnl/dnnl_sycl.h" + +#endif /* DNNL_SYCL_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a4a854a4cf138103f4c53030083e119cc0732cf1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_TYPES_H +#define DNNL_SYCL_TYPES_H + +#include "oneapi/dnnl/dnnl_sycl_types.h" + +#endif /* DNNL_SYCL_TYPES_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..e27e584a65ed16740d4fde93da3a1a049dd111aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_THREADPOOL_H +#define DNNL_THREADPOOL_H + +#include "oneapi/dnnl/dnnl_threadpool.h" + +#endif /* DNNL_THREADPOOL_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_types.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..6f4261b712dc37ec2416ba60c0c68bb30f6995e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_TYPES_H +#define DNNL_TYPES_H + +#include "oneapi/dnnl/dnnl_types.h" + +#endif /* DNNL_TYPES_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_version.h b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_version.h new file mode 100644 index 0000000000000000000000000000000000000000..32a3d5cf839b1d593f069520febfd60b323730e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_version.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_VERSION_H +#define DNNL_VERSION_H + +#include "oneapi/dnnl/dnnl_version.h" + +#endif /* DNNL_VERSION_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/fp16.h b/env-llmeval/lib/python3.10/site-packages/torch/include/fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..9d7366e997dadef17922225bcbb489288f6f9cdc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/fp16.h @@ -0,0 +1,11 @@ +#pragma once +#ifndef FP16_H +#define FP16_H + +#include + +#if defined(PSIMD_H) +#include +#endif + +#endif /* FP16_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/fxdiv.h b/env-llmeval/lib/python3.10/site-packages/torch/include/fxdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2c35038d97c55c524bb97caba2e3560cab9da504 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/fxdiv.h @@ -0,0 +1,425 @@ +#pragma once +#ifndef FXDIV_H +#define FXDIV_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include + #include +#endif + +#if defined(_MSC_VER) + #include + #if defined(_M_IX86) || defined(_M_X64) + #include + #endif +#endif + +#ifndef FXDIV_USE_INLINE_ASSEMBLY + #define FXDIV_USE_INLINE_ASSEMBLY 0 +#endif + +static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) { +#if defined(_MSC_VER) && defined(_M_IX86) + return (uint64_t) __emulu((unsigned int) a, (unsigned int) b); +#else + return (uint64_t) a * (uint64_t) b; +#endif +} + +static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b); +#elif defined(_MSC_VER) && defined(_M_IX86) + return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32); +#elif defined(_MSC_VER) && defined(_M_ARM) + return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b); +#else + return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32); +#endif +} + +static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b); +#elif defined(_MSC_VER) && defined(_M_X64) + return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b); +#elif defined(__GNUC__) && defined(__SIZEOF_INT128__) + return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64); +#else + const uint32_t a_lo = (uint32_t) a; + const uint32_t a_hi = (uint32_t) (a >> 32); + const uint32_t b_lo = (uint32_t) b; + const uint32_t b_hi = (uint32_t) (b >> 32); + + const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) + + (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo); + return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) + + ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32); +#endif +} + +static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) { +#if SIZE_MAX == UINT32_MAX + return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b); +#elif SIZE_MAX == UINT64_MAX + return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b); +#else + #error Unsupported platform +#endif +} + +struct fxdiv_divisor_uint32_t { + uint32_t value; + uint32_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint32_t { + uint32_t quotient; + uint32_t remainder; +}; + +struct fxdiv_divisor_uint64_t { + uint64_t value; + uint64_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint64_t { + uint64_t quotient; + uint64_t remainder; +}; + +struct fxdiv_divisor_size_t { + size_t value; + size_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_size_t { + size_t quotient; + size_t remainder; +}; + +static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) { + struct fxdiv_divisor_uint32_t result = { d }; + if (d == 1) { + result.m = UINT32_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t l_minus_1 = 31 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t l_minus_1 = 31 - __clz((int) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse(&l_minus_1, (unsigned long) (d - 1)); + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t l_minus_1; + __asm__("BSRL %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1); + #else + /* Based on Algorithm 2 from Hacker's delight */ + + uint32_t l_minus_1 = 0; + uint32_t x = d - 1; + uint32_t y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + #endif + uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */ + #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t q; + __asm__("DIVL %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (0) + : "cc"); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64)) + unsigned int remainder; + const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder); + #else + const uint32_t q = ((uint64_t) u_hi << 32) / d; + #endif + + result.m = q + UINT32_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) { + struct fxdiv_divisor_uint64_t result = { d }; + if (d == 1) { + result.m = UINT64_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t nlz_d = clz(d); + const uint32_t l_minus_1 = 63 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t nlz_d = __clzll((long long) d); + const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1)); + unsigned long bsr_d; + _BitScanReverse64(&bsr_d, (unsigned __int64) d); + const uint32_t nlz_d = bsr_d ^ 0x3F; + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM)) + const uint64_t d_minus_1 = d - 1; + const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0; + unsigned long l_minus_1; + if ((uint32_t) (d_minus_1 >> 32) == 0) { + _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1); + } else { + _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32)); + l_minus_1 += 32; + } + const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2; + #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t l_minus_1; + __asm__("BSRQ %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1); + const uint32_t nlz_d = __builtin_clzll(d); + #else + /* Based on Algorithm 2 from Hacker's delight */ + const uint64_t d_minus_1 = d - 1; + const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0; + uint32_t l_minus_1 = 0; + uint32_t x = (uint32_t) d_minus_1; + uint32_t y = d_minus_1 >> 32; + if (y != 0) { + l_minus_1 += 32; + x = y; + } + y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2; + #endif + uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */ + #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t q; + __asm__("DIVQ %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (UINT64_C(0)) + : "cc"); + #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__) + /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */ + const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d)); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64) + unsigned __int64 remainder; + const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder); + #else + /* Implementation based on code from Hacker's delight */ + + /* Normalize divisor and shift divident left */ + d <<= nlz_d; + u_hi <<= nlz_d; + /* Break divisor up into two 32-bit digits */ + const uint64_t d_hi = (uint32_t) (d >> 32); + const uint32_t d_lo = (uint32_t) d; + + /* Compute the first quotient digit, q1 */ + uint64_t q1 = u_hi / d_hi; + uint64_t r1 = u_hi - q1 * d_hi; + + while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) { + q1 -= 1; + r1 += d_hi; + if ((r1 >> 32) != 0) { + break; + } + } + + /* Multiply and subtract. */ + u_hi = (u_hi << 32) - q1 * d; + + /* Compute the second quotient digit, q0 */ + uint64_t q0 = u_hi / d_hi; + uint64_t r0 = u_hi - q0 * d_hi; + + while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) { + q0 -= 1; + r0 += d_hi; + if ((r0 >> 32) != 0) { + break; + } + } + const uint64_t q = (q1 << 32) | (uint32_t) q0; + #endif + result.m = q + UINT64_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d); +#else + #error Unsupported platform +#endif + struct fxdiv_divisor_size_t size_result = { + (size_t) uint_result.value, + (size_t) uint_result.m, + uint_result.s1, + uint_result.s2 + }; + return size_result; +} + +static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint32_divisor = { + (uint32_t) divisor.value, + (uint32_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint64_divisor = { + (uint64_t) divisor.value, + (uint64_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor); +#else + #error Unsupported platform +#endif +} + +static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity); + return quotient * granularity.value; +} + +static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity); + return quotient * granularity.value; +} + +static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) { + const size_t quotient = fxdiv_quotient_size_t(n, granularity); + return quotient * granularity.value; +} + +static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + const uint32_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint32_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + const uint64_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint64_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + const size_t remainder = n - quotient * divisor.value; + struct fxdiv_result_size_t result = { quotient, remainder }; + return result; +} + +#endif /* FXDIV_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/libshm.h b/env-llmeval/lib/python3.10/site-packages/torch/include/libshm.h new file mode 100644 index 0000000000000000000000000000000000000000..28024aa2338d1f46ce280abeb92a633f89be1385 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/libshm.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#ifdef __cplusplus + +void libshm_init(const char* manager_exec_path); + +// Superclass to run a constructor before at::RefcountedMapAllocator +class THManagedMapAllocatorInit { + protected: + THManagedMapAllocatorInit(const char* manager_handle, const char* filename); + std::string manager_handle_; +}; + +// Like a at::RefcountedMapAllocator, but it also makes use of an external +// shared memory manager process to ensure that shared memory regions actually +// get freed in the end (even if processes lose the memory). +class THManagedMapAllocator : private THManagedMapAllocatorInit, + public at::RefcountedMapAllocator { + public: + THManagedMapAllocator( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + + void close() override; + + ~THManagedMapAllocator() override { + close(); + } + + static at::DataPtr makeDataPtr( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + static THManagedMapAllocator* fromDataPtr(const at::DataPtr&); + + const char* manager_handle() const { + return manager_handle_.c_str(); + } +}; + +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/nnpack.h b/env-llmeval/lib/python3.10/site-packages/torch/include/nnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..97b5ff390076e9ab7ae91e67bfc0d78736aaeffd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/nnpack.h @@ -0,0 +1,659 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any NNPACK function call. + */ +enum nnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + nnp_status_success = 0, + /** NNPACK function was called with batch_size == 0. */ + nnp_status_invalid_batch_size = 2, + /** NNPACK function was called with channels == 0. */ + nnp_status_invalid_channels = 3, + /** NNPACK function was called with input_channels == 0. */ + nnp_status_invalid_input_channels = 4, + /** NNPACK function was called with output_channels == 0. */ + nnp_status_invalid_output_channels = 5, + /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */ + nnp_status_invalid_input_size = 10, + /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */ + nnp_status_invalid_input_stride = 11, + /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.: + * + * - input_padding.left >= kernel_size.width (>= pooling_size.width) + * - input_padding.right >= kernel_size.width (>= pooling_size.width) + * - input_padding.top >= kernel_size.height (>= pooling_size.height) + * - input_padding.bottom >= kernel_size.height (>= pooling_size.height) + */ + nnp_status_invalid_input_padding = 12, + /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */ + nnp_status_invalid_kernel_size = 13, + /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */ + nnp_status_invalid_pooling_size = 14, + /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */ + nnp_status_invalid_pooling_stride = 15, + /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */ + nnp_status_invalid_algorithm = 16, + /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */ + nnp_status_invalid_transform_strategy = 17, + /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */ + nnp_status_invalid_output_subsampling = 13, + /** NNPACK function was called with activation not in nnp_activation enum */ + nnp_status_invalid_activation = 14, + /** NNPACK function was called with invalid activation parameters */ + nnp_status_invalid_activation_parameters = 15, + + /** NNPACK does not support the particular input size for the function */ + nnp_status_unsupported_input_size = 20, + /** NNPACK does not support the particular input stride for the function */ + nnp_status_unsupported_input_stride = 21, + /** NNPACK does not support the particular input padding for the function */ + nnp_status_unsupported_input_padding = 22, + /** NNPACK does not support the particular kernel size for the function */ + nnp_status_unsupported_kernel_size = 23, + /** NNPACK does not support the particular pooling size for the function */ + nnp_status_unsupported_pooling_size = 24, + /** NNPACK does not support the particular pooling stride for the function */ + nnp_status_unsupported_pooling_stride = 25, + /** NNPACK does not support the particular convolution algorithm for the function */ + nnp_status_unsupported_algorithm = 26, + /** NNPACK does not support the particular convolution transform strategy for the algorithm */ + nnp_status_unsupported_transform_strategy = 27, + /** NNPACK does not support the particular activation function for the function */ + nnp_status_unsupported_activation = 28, + /** NNPACK does not support the particular activation function parameters for the function */ + nnp_status_unsupported_activation_parameters = 29, + + /** NNPACK function was called before the library was initialized */ + nnp_status_uninitialized = 50, + /** NNPACK does not implement this function for the host CPU */ + nnp_status_unsupported_hardware = 51, + /** NNPACK failed to allocate memory for temporary buffers */ + nnp_status_out_of_memory = 52, + /** Scratch space buffer is too small */ + nnp_status_insufficient_buffer = 53, + /** Scratch space buffer is not properly aligned */ + nnp_status_misaligned_buffer = 54 +}; + +/** + * @brief Activation applied applied after a convolutional or fully-connected layer. + */ +enum nnp_activation { + /** Identity activation f(x) := x, i.e. no transformation */ + nnp_activation_identity = 0, + /** ReLU activation f(x) := max(0, x) */ + nnp_activation_relu = 1, +}; + +/** + * @brief Algorithm for computing convolutional layers. + */ +enum nnp_convolution_algorithm { + /** Let NNPACK choose the algorithm depending on layer parameters */ + nnp_convolution_algorithm_auto = 0, + /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */ + nnp_convolution_algorithm_ft8x8 = 1, + /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */ + nnp_convolution_algorithm_ft16x16 = 2, + /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */ + nnp_convolution_algorithm_wt8x8 = 3, + /** Direct convolution via implicit GEMM. */ + nnp_convolution_algorithm_implicit_gemm = 4, + /** Direct convolution implementation. */ + nnp_convolution_algorithm_direct = 5, + /** + * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16. + * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP), + * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8. + */ + nnp_convolution_algorithm_wt8x8_fp16 = 6, +}; + +enum nnp_convolution_transform_strategy { + nnp_convolution_transform_strategy_compute = 1, + nnp_convolution_transform_strategy_precompute = 2, + nnp_convolution_transform_strategy_reuse = 3 +}; + +/* For backward compatibility */ +#define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute +#define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute + +/** + * @brief Size of images, kernels, and pooling filters in NNPACK. + */ +struct nnp_size { + /** Width (horizontal size) of an image, kernel, or pooling filter. */ + size_t width; + /** Height (vertical size) of an image, kernel, or pooling filter. */ + size_t height; +}; + +/** + * @brief Padding of images in NNPACK. + */ +struct nnp_padding { + /** Padding above the image data */ + size_t top; + /** Padding on the right of image data */ + size_t right; + /** Padding below the image data */ + size_t bottom; + /** Padding on the left of image data */ + size_t left; +}; + +/** + * @brief Profiling information about time spent in different phases of a function call. + */ +struct nnp_profile { + /** Time spent inside the function call, in seconds. */ + double total; + /** Time spend on transformation of the input or input gradient tensor, in seconds. */ + double input_transform; + /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */ + double kernel_transform; + /** Time spend on transformation of the output or output gradient tensor, in seconds. */ + double output_transform; + /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */ + double block_multiplication; +}; + +enum nnp_status nnp_initialize(void); + +enum nnp_status nnp_deinitialize(void); + +/** + * @brief Computes output of a 2D convolutional layer from input and kernel tensors. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_convolution_inference for optimal performance. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ + +enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients). + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* grad_output, + const float* kernel, + float* grad_input, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[out] grad_kernel A 4D tensor + * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* grad_output, + float* grad_kernel, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param transform_strategy A strategy that guides computation of kernel transforms coefficients. + * Possible values are: + * + * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed + * coefficients. + * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed + * coefficients. + * + * @param input_channels The number of channels (AKA features, dimensions) in the input image. + * @param output_channels The number of channels (AKA features, dimensions) in the output image. + * @param input_size Size of input image, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input image. + * @param kernel_size Kernel size. + * @param output_subsampling Subsample region for output, also known as convolution stride. + * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes. + * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size + * of required workspace memory at the workspace_size location, and exit without + * computations. + * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory + * before and deallocate after this computation, potentially at significant runtime cost. + * @param[in,out] workspace_size Pointer to the size of workspace buffer. + * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to + * the location specified by this pointer. + * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of + * the buffer, in bytes. + * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK + * would allocate memory before and deallocate after this computation, potentially at + * significant runtime cost. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer from input and kernel matrices. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_fully_connected_inference for optimal performance. + * @param batch_size The number of vectors on the input and output of the fully connected layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input matrix. + * @param output_channels The number of channels (AKA features, dimensions) in the output matrix. + * @param[in] input A 2D matrix input[batch_size][input_channels]. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels]. + * @param[out] output A 2D matrix output[batch_size][output_channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_output( + size_t batch_size, + size_t input_channels, + size_t output_channels, + const float input[], + const float kernel[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference( + size_t input_channels, + size_t output_channels, + const float* input, + const float* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference_f16f32( + size_t input_channels, + size_t output_channels, + const float* input, + const void* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a max-pooling layer for an input tensor. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of images on the input and output of the max-pooling layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but + * affect the output size. + * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported. + * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported. + * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width]. + * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where + * output_size.height = ceil( + * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) / + * pooling_stride.height) + 1 + * output_size.width = ceil( + * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) / + * pooling_stride.width) + 1 + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_max_pooling_output( + size_t batch_size, + size_t channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size pooling_size, + struct nnp_size pooling_stride, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a softmax layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the softmax layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output vectors. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_softmax_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + float negative_slope, + pthreadpool_t threadpool); + +/** + * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_input_gradient( + size_t batch_size, + size_t channels, + const float grad_output[], + const float input[], + float grad_input[], + float negative_slope, + pthreadpool_t threadpool); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +// Backward compatible implementations for nnp_convolution_*, if we are in C++ +// mode. +inline enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_output( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, kernel, bias, output, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float grad_output[], + const float kernel[], + float grad_input[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_input_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + grad_output, kernel, grad_input, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float grad_output[], + float grad_kernel[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_kernel_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, grad_output, grad_kernel, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) { + return nnp_convolution_inference( + algorithm, transform_strategy, + input_channels, output_channels, + input_size, input_padding, kernel_size, output_subsampling, + input, kernel, bias, output, NULL, NULL, + nnp_activation_identity, NULL, + threadpool, profile); +} + +#endif // __cplusplus diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/psimd.h b/env-llmeval/lib/python3.10/site-packages/torch/include/psimd.h new file mode 100644 index 0000000000000000000000000000000000000000..b7cb65d799c98931a73b3184511b1bd8c2b30ec0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/psimd.h @@ -0,0 +1,1384 @@ +#pragma once +#ifndef PSIMD_H +#define PSIMD_H + +#if defined(__CUDA_ARCH__) + /* CUDA compiler */ + #define PSIMD_INTRINSIC __forceinline__ __device__ +#elif defined(__OPENCL_VERSION__) + /* OpenCL compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__INTEL_COMPILER) + /* Intel compiler, even on Windows */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(__GNUC__) + /* GCC-compatible compiler (gcc/clang/icc) */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(_MSC_VER) + /* MSVC-compatible compiler (cl/icl/clang-cl) */ + #define PSIMD_INTRINSIC __forceinline static +#elif defined(__cplusplus) + /* Generic C++ compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + /* Generic C99 compiler */ + #define PSIMD_INTRINSIC inline static +#else + /* Generic C compiler */ + #define PSIMD_INTRINSIC static +#endif + +#if defined(__GNUC__) || defined(__clang__) + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + #include + #endif + + #if defined(__SSE2__) + #include + #endif + + #if defined(__SSE3__) + #include + #endif + + #if defined(__SSSE3__) + #include + #endif + + #if defined(__SSE4_1__) + #include + #endif + + #if defined(__SSE4_2__) + #include + #endif + + #if defined(__AVX__) + #include + #endif +#elif defined(_MSC_VER) + #include +#endif + +#if defined(__cplusplus) + #define PSIMD_CXX_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) + #define PSIMD_C11_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + #define PSIMD_C99_SYNTAX +#else + #define PSIMD_C89_SYNTAX +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define PSIMD_HAVE_F64 0 + #define PSIMD_HAVE_F32 1 + #define PSIMD_HAVE_U8 1 + #define PSIMD_HAVE_S8 1 + #define PSIMD_HAVE_U16 1 + #define PSIMD_HAVE_S16 1 + #define PSIMD_HAVE_U32 1 + #define PSIMD_HAVE_S32 1 + #define PSIMD_HAVE_U64 0 + #define PSIMD_HAVE_S64 0 + + typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1))); + typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1))); + typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2))); + typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2))); + typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4))); + typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4))); + typedef float psimd_f32 __attribute__((vector_size(16), aligned(4))); + + typedef struct { + psimd_s8 lo; + psimd_s8 hi; + } psimd_s8x2; + + typedef struct { + psimd_u8 lo; + psimd_u8 hi; + } psimd_u8x2; + + typedef struct { + psimd_s16 lo; + psimd_s16 hi; + } psimd_s16x2; + + typedef struct { + psimd_u16 lo; + psimd_u16 hi; + } psimd_u16x2; + + typedef struct { + psimd_s32 lo; + psimd_s32 hi; + } psimd_s32x2; + + typedef struct { + psimd_u32 lo; + psimd_u32 hi; + } psimd_u32x2; + + typedef struct { + psimd_f32 lo; + psimd_f32 hi; + } psimd_f32x2; + + /* Bit casts */ + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + /* Swap */ + PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) { + const psimd_s8 new_a = *b; + const psimd_s8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) { + const psimd_u8 new_a = *b; + const psimd_u8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) { + const psimd_s16 new_a = *b; + const psimd_s16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) { + const psimd_u16 new_a = *b; + const psimd_u16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) { + const psimd_s32 new_a = *b; + const psimd_s32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) { + const psimd_u32 new_a = *b; + const psimd_u32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) { + const psimd_f32 new_a = *b; + const psimd_f32 new_b = *a; + *a = new_a; + *b = new_b; + } + + /* Zero-initialization */ + PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) { + return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) { + return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) { + return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) { + return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) { + return (psimd_s32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) { + return (psimd_u32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) { + return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f }; + } + + /* Initialization to the same constant */ + PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) { + return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) { + return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) { + return (psimd_s16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) { + return (psimd_u16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) { + return (psimd_s32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) { + return (psimd_u32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) { + return (psimd_f32) { c, c, c, c }; + } + + /* Load vector */ + PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) { + return *((const psimd_s8*) address); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) { + return *((const psimd_u8*) address); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) { + return *((const psimd_s16*) address); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) { + return *((const psimd_u16*) address); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) { + return *((const psimd_s32*) address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) { + return *((const psimd_u32*) address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) { + return *((const psimd_f32*) address); + } + + PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) { + return psimd_splat_s8(*((const int8_t*) address)); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) { + return psimd_splat_u8(*((const uint8_t*) address)); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) { + return psimd_splat_s16(*((const int16_t*) address)); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) { + return psimd_splat_u16(*((const uint16_t*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) { + return psimd_splat_s32(*((const int32_t*) address)); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) { + return psimd_splat_u32(*((const uint32_t*) address)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) { + return psimd_splat_f32(*((const float*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) { + return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) { + return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) { + return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) { + return psimd_load_s32(address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) { + return psimd_load_u32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7); + #else + return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6); + #else + return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) { + return psimd_load_stride2_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + const float* address3_f32 = address2_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) { + return psimd_load1_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) { + return psimd_load_stride_f32(address, stride); + } + + /* Store vector */ + PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) { + *((psimd_s8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) { + *((psimd_u8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) { + *((psimd_s16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) { + *((psimd_u16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) { + *((psimd_s32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) { + *((psimd_u32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) { + *((psimd_f32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) { + *((int32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) { + *((uint32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) { + *((float*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + address_s32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + address_u32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + address_f32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) { + psimd_store_s32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) { + psimd_store_u32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) { + psimd_store_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + float* address3_f32 = address2_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + *address3_f32 = value[3]; + } + + PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) { + psimd_store1_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[stride] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + } + + /* Vector addition */ + PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a + b; + #endif + } + + /* Vector subtraction */ + PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a - b; + #endif + } + + /* Vector multiplication */ + PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a * b; + #endif + } + + /* Quasi-Fused Multiply-Add */ + PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) { + #if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA) + return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__) + return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__) + return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA + return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c); + #else + return a + b * c; + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) { + return a / b; + } + + /* Vector and */ + PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (mask & (psimd_s32) v); + } + + /* Vector and-not */ + PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (~mask & (psimd_s32) v); + } + + /* Vector blend */ + PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s8) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u8) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s16) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u16) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s32) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_f32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + /* Vector blend on sign */ + PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) { + return psimd_blend_s8(x >> psimd_splat_s8(7), a, b); + } + + PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) { + return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b); + } + + PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) { + return psimd_blend_s16(x >> psimd_splat_s16(15), a, b); + } + + PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) { + return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b); + } + + PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) { + return psimd_blend_s32(x >> psimd_splat_s32(31), a, b); + } + + PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) { + return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b); + } + + PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) { + const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31); + return psimd_blend_f32(mask, a, b); + } + + /* Vector absolute value */ + PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v & ~mask); + } + + /* Vector negation */ + PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v ^ mask); + } + + /* Vector maximum */ + PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_max_f32x4(a, b); + #else + return psimd_blend_f32(a > b, a, b); + #endif + } + + /* Vector minimum */ + PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_min_f32x4(a, b); + #else + return psimd_blend_f32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) { + #if defined(__clang__) + return __builtin_convertvector(v, psimd_f32); + #elif defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vcvtq_f32_s32((int32x4_t) v); + #elif defined(__SSE2__) + return (psimd_f32) _mm_cvtepi32_ps((__m128i) v); + #else + return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] }; + #endif + } + + /* Broadcast vector element */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 0, 0, 0, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 1, 1, 1, 1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 2, 2, 2, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 3, 3, 3); + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 }); + } + #endif + + /* Reversal of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + #endif + + /* Interleaving of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + #endif + + /* Concatenation of low/high vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + #endif + + /* Concatenation of even/odd vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + #endif + + /* Vector reduce */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1); + return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1); + const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }); + return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_sum_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_max_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_min_f32(v); + return result[0]; + } + #endif +#endif + +#endif /* PSIMD_H */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/pthreadpool.h b/env-llmeval/lib/python3.10/site-packages/torch/include/pthreadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..953ccc4cc24070aa4897fabc081cba466e34170a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/pthreadpool.h @@ -0,0 +1,2555 @@ +#ifndef PTHREADPOOL_H_ +#define PTHREADPOOL_H_ + +#include +#include + +typedef struct pthreadpool* pthreadpool_t; + +typedef void (*pthreadpool_task_1d_t)(void*, size_t); +typedef void (*pthreadpool_task_1d_with_thread_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_with_thread_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_thread_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_2d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); + + +/** + * Disable support for denormalized numbers to the maximum extent possible for + * the duration of the computation. + * + * Handling denormalized floating-point numbers is often implemented in + * microcode, and incurs significant performance degradation. This hint + * instructs the thread pool to disable support for denormalized numbers before + * running the computation by manipulating architecture-specific control + * registers, and restore the initial value of control registers after the + * computation is complete. The thread pool temporary disables denormalized + * numbers on all threads involved in the computation (i.e. the caller threads, + * and potentially worker threads). + * + * Disabling denormalized numbers may have a small negative effect on results' + * accuracy. As various architectures differ in capabilities to control + * processing of denormalized numbers, using this flag may also hurt results' + * reproducibility across different instruction set architectures. + */ +#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001 + +/** + * Yield worker threads to the system scheduler after the operation is finished. + * + * Force workers to use kernel wait (instead of active spin-wait by default) for + * new commands after this command is processed. This flag affects only the + * immediate next operation on this thread pool. To make the thread pool always + * use kernel wait, pass this flag to all parallelization functions. + */ +#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Create a thread pool with the specified number of threads. + * + * @param threads_count the number of threads in the thread pool. + * A value of 0 has special interpretation: it creates a thread pool with as + * many threads as there are logical processors in the system. + * + * @returns A pointer to an opaque thread pool object if the call is + * successful, or NULL pointer if the call failed. + */ +pthreadpool_t pthreadpool_create(size_t threads_count); + +/** + * Query the number of threads in a thread pool. + * + * @param threadpool the thread pool to query. + * + * @returns The number of threads in the thread pool. + */ +size_t pthreadpool_get_threads_count(pthreadpool_t threadpool); + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, thread_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_thread_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range; i++) + * function(context, uarch_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range the number of items on the 1D grid to process. + * The specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * function(context, i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_tile_1d_t function, + void* context, + size_t range, + size_t tile, + uint32_t flags); + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, thread_index, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, thread_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, + * cpuinfo initialization failed, or index returned + * by cpuinfo_get_current_uarch_index() exceeds + * the max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected + * by the specified function. If the index returned + * by cpuinfo_get_current_uarch_index() exceeds this + * value, default_uarch_index will be used instead. + * default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 2D grid. + * @param range_j the number of items to process along the second + * dimension of the 2D grid. + * @param tile_j the maximum number of items along the first + * dimension of the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second + * dimension of the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * function(context, i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + pthreadpool_task_3d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension and passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_j the maximum number of items along the second + * dimension of the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * function(context, i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + pthreadpool_task_4d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, uarch_index, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 4D grid. + * @param range_j the number of items to process along the second + * dimension of the 4D grid. + * @param range_k the number of items to process along the third + * dimension of the 4D grid. + * @param range_l the number of items to process along the fourth + * dimension of the 4D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth + * dimension of the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * function(context, i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + pthreadpool_task_5d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one function call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * function(context, i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one function call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags); + +/** + * Terminates threads in the thread pool and releases associated resources. + * + * @warning Accessing the thread pool after a call to this function constitutes + * undefined behaviour and may cause data corruption. + * + * @param[in,out] threadpool The thread pool to destroy. + */ +void pthreadpool_destroy(pthreadpool_t threadpool); + +#ifndef PTHREADPOOL_NO_DEPRECATED_API + +/* Legacy API for compatibility with pre-existing users (e.g. NNPACK) */ +#if defined(__GNUC__) + #define PTHREADPOOL_DEPRECATED __attribute__((__deprecated__)) +#else + #define PTHREADPOOL_DEPRECATED +#endif + +typedef void (*pthreadpool_function_1d_t)(void*, size_t); +typedef void (*pthreadpool_function_1d_tiled_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_3d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_4d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +void pthreadpool_compute_1d( + pthreadpool_t threadpool, + pthreadpool_function_1d_t function, + void* argument, + size_t range) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_1d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_1d_tiled_t function, + void* argument, + size_t range, + size_t tile) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d( + pthreadpool_t threadpool, + pthreadpool_function_2d_t function, + void* argument, + size_t range_i, + size_t range_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_2d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_3d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_3d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_i, + size_t tile_j, + size_t tile_k) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_4d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_4d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_i, + size_t tile_j, + size_t tile_k, + size_t tile_l) PTHREADPOOL_DEPRECATED; + +#endif /* PTHREADPOOL_NO_DEPRECATED_API */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus + +namespace libpthreadpool { +namespace detail { +namespace { + +template +void call_wrapper_1d(void* arg, size_t i) { + (*static_cast(arg))(i); +} + +template +void call_wrapper_1d_tile_1d(void* arg, size_t range_i, size_t tile_i) { + (*static_cast(arg))(range_i, tile_i); +} + +template +void call_wrapper_2d(void* functor, size_t i, size_t j) { + (*static_cast(functor))(i, j); +} + +template +void call_wrapper_2d_tile_1d(void* functor, + size_t i, size_t range_j, size_t tile_j) +{ + (*static_cast(functor))(i, range_j, tile_j); +} + +template +void call_wrapper_2d_tile_2d(void* functor, + size_t range_i, size_t range_j, + size_t tile_i, size_t tile_j) +{ + (*static_cast(functor))(range_i, range_j, tile_i, tile_j); +} + +template +void call_wrapper_3d(void* functor, size_t i, size_t j, size_t k) { + (*static_cast(functor))(i, j, k); +} + +template +void call_wrapper_3d_tile_1d(void* functor, + size_t i, size_t j, size_t range_k, + size_t tile_k) +{ + (*static_cast(functor))(i, j, range_k, tile_k); +} + +template +void call_wrapper_3d_tile_2d(void* functor, + size_t i, size_t range_j, size_t range_k, + size_t tile_j, size_t tile_k) +{ + (*static_cast(functor))(i, range_j, range_k, tile_j, tile_k); +} + +template +void call_wrapper_4d(void* functor, size_t i, size_t j, size_t k, size_t l) { + (*static_cast(functor))(i, j, k, l); +} + +template +void call_wrapper_4d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, + size_t tile_l) +{ + (*static_cast(functor))(i, j, k, range_l, tile_l); +} + +template +void call_wrapper_4d_tile_2d(void* functor, + size_t i, size_t j, size_t range_k, size_t range_l, + size_t tile_k, size_t tile_l) +{ + (*static_cast(functor))(i, j, range_k, range_l, tile_k, tile_l); +} + +template +void call_wrapper_5d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m) { + (*static_cast(functor))(i, j, k, l, m); +} + +template +void call_wrapper_5d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, + size_t tile_m) +{ + (*static_cast(functor))(i, j, k, l, range_m, tile_m); +} + +template +void call_wrapper_5d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, size_t range_m, + size_t tile_l, size_t tile_m) +{ + (*static_cast(functor))(i, j, k, range_l, range_m, tile_l, tile_m); +} + +template +void call_wrapper_6d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + (*static_cast(functor))(i, j, k, l, m, n); +} + +template +void call_wrapper_6d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t m, size_t range_n, + size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, m, range_n, tile_n); +} + +template +void call_wrapper_6d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, size_t range_n, + size_t tile_m, size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, range_m, range_n, tile_m, tile_n); +} + +} /* namespace */ +} /* namespace detail */ +} /* namespace libpthreadpool */ + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * functor(i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range the number of items on the 1D grid to process. The + * specified functor will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d, + const_cast(static_cast(&functor)), + range, + flags); +} + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * functor(i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + size_t tile, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d_tile_1d, + const_cast(static_cast(&functor)), + range, + tile, + flags); +} + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * functor(i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one functor call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_i, + tile_j, + flags); +} + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * functor(i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one functor call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_j, + tile_k, + flags); +} + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * functor(i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one functor call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_k, + tile_l, + flags); +} + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * functor(i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one functor call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_l, + tile_m, + flags); +} + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * functor(i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one functor call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_m, + tile_n, + flags); +} + +#endif /* __cplusplus */ + +#endif /* PTHREADPOOL_H_ */ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack.h b/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..591fa68eba5a3c8a6b22c12c4fa6efbefd098b84 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any QNNPACK function call. + */ +enum qnnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + qnnp_status_success = 0, + qnnp_status_uninitialized = 1, + qnnp_status_invalid_parameter = 2, + qnnp_status_unsupported_parameter = 3, + qnnp_status_unsupported_hardware = 4, + qnnp_status_out_of_memory = 5, +}; + +enum qnnp_status qnnp_initialize(void); + +enum qnnp_status qnnp_deinitialize(void); + +typedef struct qnnp_operator* qnnp_operator_t; + +enum qnnp_status qnnp_create_convolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* convolution); + +enum qnnp_status qnnp_setup_convolution2d_nhwc_q8( + qnnp_operator_t convolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_deconvolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* deconvolution); + +enum qnnp_status qnnp_setup_deconvolution2d_nhwc_q8( + qnnp_operator_t deconvolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_fully_connected_nc_q8( + size_t input_channels, + size_t output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* fully_connected); + +enum qnnp_status qnnp_setup_fully_connected_nc_q8( + qnnp_operator_t fully_connected, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_global_average_pooling_nwc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* global_average_pooling); + +enum qnnp_status qnnp_setup_global_average_pooling_nwc_q8( + qnnp_operator_t global_average_pooling, + size_t batch_size, + size_t width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_average_pooling2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* average_pooling); + +enum qnnp_status qnnp_setup_average_pooling2d_nhwc_q8( + qnnp_operator_t average_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* max_pooling); + +enum qnnp_status qnnp_setup_max_pooling2d_nhwc_u8( + qnnp_operator_t max_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + uint32_t flags, + qnnp_operator_t* channel_shuffle); + +enum qnnp_status qnnp_setup_channel_shuffle_nc_x8( + qnnp_operator_t channel_shuffle, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_add_nc_q8( + size_t channels, + uint8_t a_zero_point, + float a_scale, + uint8_t b_zero_point, + float b_scale, + uint8_t sum_zero_point, + float sum_scale, + uint8_t sum_min, + uint8_t sum_max, + uint32_t flags, + qnnp_operator_t* add); + +enum qnnp_status qnnp_setup_add_nc_q8( + qnnp_operator_t add, + size_t batch_size, + const uint8_t* a, + size_t a_stride, + const uint8_t* b, + size_t b_stride, + uint8_t* sum, + size_t sum_stride); + +enum qnnp_status qnnp_create_clamp_nc_u8( + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* clamp); + +enum qnnp_status qnnp_setup_clamp_nc_u8( + qnnp_operator_t clamp, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_sigmoid_nc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* sigmoid); + +enum qnnp_status qnnp_setup_sigmoid_nc_q8( + qnnp_operator_t sigmoid, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_leaky_relu_nc_q8( + size_t channels, + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* leaky_relu); + +enum qnnp_status qnnp_setup_leaky_relu_nc_q8( + qnnp_operator_t leaky_relu, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_softargmax_nc_q8( + size_t channels, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + qnnp_operator_t* softargmax); + +enum qnnp_status qnnp_setup_softargmax_nc_q8( + qnnp_operator_t softargmax, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_run_operator( + qnnp_operator_t op, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_delete_operator( + qnnp_operator_t op); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack_func.h b/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack_func.h new file mode 100644 index 0000000000000000000000000000000000000000..10bbc000192d7e03745e2cf3fb263a9655cde00c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack_func.h @@ -0,0 +1,166 @@ +#pragma once + +#include +#include + +namespace qnnpack { +class PrePackConvWeights final { + public: + PrePackConvWeights( + const pytorch_qnnp_operator_t convolution, + const uint8_t* kernel_zero_points, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + int64_t getOutputChannels() const + { + return output_channels_; + } + + ~PrePackConvWeights() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PrePackConvWeights() = delete; + PrePackConvWeights(const PrePackConvWeights&) = delete; + PrePackConvWeights& operator=(const PrePackConvWeights&) = delete; + + private: + void* packed_weights_ = nullptr; + int64_t output_channels_; +}; + +class PackBMatrix final { + public: + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t* kernel_zero_points, + const float* requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + // This constructor is to be used for dynamic mode + // quantization. In dynamic mode, we dont yet support + // per channel quantization, and paying the cost of + // memory allocation for per channel zero point and + // requant scale will hurt performance. + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t kernel_zero_point, + const float requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + void unpackWeights( + const uint8_t* kernel_zero_points, + int8_t* kernel + ) const; + + size_t getInputChannels() const + { + return input_channels_; + } + + size_t getOutputChannels() const + { + return output_channels_; + } + + ~PackBMatrix() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PackBMatrix() = delete; + PackBMatrix(const PackBMatrix&) = delete; + PackBMatrix& operator=(const PackBMatrix&) = delete; + + private: + void* packed_weights_ = nullptr; + size_t input_channels_; + size_t output_channels_; +}; + +enum pytorch_qnnp_status qnnpackLinear( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + uint8_t* output, + const size_t output_stride, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackConv( + const pytorch_qnnp_operator_t convolution, + void* packed_weights, + const size_t batch_size, + const size_t input_depth, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackDeConv( + const pytorch_qnnp_operator_t deconvolution, + void* packed_weights, + const size_t batch_size, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackLinearDynamic( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* dequantization_scales, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + const float* bias, + float* output, + const size_t output_stride, + pthreadpool_t threadpool); + +} // namespace qnnpack diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/sleef.h b/env-llmeval/lib/python3.10/site-packages/torch/include/sleef.h new file mode 100644 index 0000000000000000000000000000000000000000..de36514f991a5f9b4774b232a1a6350c47c2c74c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/sleef.h @@ -0,0 +1,4459 @@ +// Copyright Naoki Shibata and contributors 2010 - 2020. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef __SLEEF_H__ +#define __SLEEF_H__ + +#define SLEEF_VERSION_MAJOR 3 +#define SLEEF_VERSION_MINOR 6 +#define SLEEF_VERSION_PATCHLEVEL 0 + +#include +#include + +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define CONST const +#else +#define CONST +#endif + +#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__) +#ifndef FP_FAST_FMA +#define FP_FAST_FMA +#endif +#ifndef FP_FAST_FMAF +#define FP_FAST_FMAF +#endif +#endif + +#if defined(_MSC_VER) && !defined(__STDC__) +#define __STDC__ 1 +#endif + +#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllexport) +#else // #ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllimport) +#if (defined(_MSC_VER)) +#pragma comment(lib,"sleef.lib") +#endif // #if (defined(_MSC_VER)) +#endif // #ifdef IMPORT_IS_EXPORT +#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#define IMPORT +#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) + +#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__)) +#include +#endif + +#if (defined(_MSC_VER)) +#include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include +#endif + +#if defined(__ARM_FEATURE_SVE) +#include +#endif + +#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +#include +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +#if defined(__VX__) && defined(__VEC__) +#ifndef SLEEF_VECINTRIN_H_INCLUDED +#include +#define SLEEF_VECINTRIN_H_INCLUDED +#endif +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +// + +#ifndef SLEEF_FP_ILOGB0 +#define SLEEF_FP_ILOGB0 ((int)-2147483648) +#endif + +#ifndef SLEEF_FP_ILOGBNAN +#define SLEEF_FP_ILOGBNAN ((int)2147483647) +#endif + +// + +IMPORT void *Sleef_malloc(size_t z); +IMPORT void Sleef_free(void *ptr); +IMPORT uint64_t Sleef_currentTimeMicros(); + +#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER) +IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx); +#endif + +// + +#ifndef Sleef_double2_DEFINED +#define Sleef_double2_DEFINED +typedef struct { + double x, y; +} Sleef_double2; +#endif + +#ifndef Sleef_float2_DEFINED +#define Sleef_float2_DEFINED +typedef struct { + float x, y; +} Sleef_float2; +#endif + +#ifndef Sleef_longdouble2_DEFINED +#define Sleef_longdouble2_DEFINED +typedef struct { + long double x, y; +} Sleef_longdouble2; +#endif + +#if !defined(Sleef_quad_DEFINED) +#define Sleef_quad_DEFINED +#if defined(__SIZEOF_FLOAT128__) || (defined(__linux__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(__PPC64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 8) +typedef __float128 Sleef_quad; +#define SLEEF_QUAD_C(x) (x ## Q) +//#elif defined(__SIZEOF_LONG_DOUBLE__) && defined(__aarch64__) +//typedef long double Sleef_quad; +//#define SLEEF_QUAD_C(x) (x ## L) +#else +typedef struct { uint64_t x, y; } Sleef_quad; +#endif +#endif + +#if !defined(Sleef_quad2_DEFINED) +#define Sleef_quad2_DEFINED +typedef union { + struct { + Sleef_quad x, y; + }; + Sleef_quad s[2]; +} Sleef_quad2; +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + +IMPORT CONST double Sleef_sin_u35(double); +IMPORT CONST double Sleef_cos_u35(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u35(double); +IMPORT CONST double Sleef_tan_u35(double); +IMPORT CONST double Sleef_asin_u35(double); +IMPORT CONST double Sleef_acos_u35(double); +IMPORT CONST double Sleef_atan_u35(double); +IMPORT CONST double Sleef_atan2_u35(double, double); +IMPORT CONST double Sleef_log_u35(double); +IMPORT CONST double Sleef_cbrt_u35(double); +IMPORT CONST double Sleef_sin_u10(double); +IMPORT CONST double Sleef_cos_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u10(double); +IMPORT CONST double Sleef_tan_u10(double); +IMPORT CONST double Sleef_asin_u10(double); +IMPORT CONST double Sleef_acos_u10(double); +IMPORT CONST double Sleef_atan_u10(double); +IMPORT CONST double Sleef_atan2_u10(double, double); +IMPORT CONST double Sleef_log_u10(double); +IMPORT CONST double Sleef_cbrt_u10(double); +IMPORT CONST double Sleef_exp_u10(double); +IMPORT CONST double Sleef_pow_u10(double, double); +IMPORT CONST double Sleef_sinh_u10(double); +IMPORT CONST double Sleef_cosh_u10(double); +IMPORT CONST double Sleef_tanh_u10(double); +IMPORT CONST double Sleef_sinh_u35(double); +IMPORT CONST double Sleef_cosh_u35(double); +IMPORT CONST double Sleef_tanh_u35(double); +IMPORT CONST double Sleef_asinh_u10(double); +IMPORT CONST double Sleef_acosh_u10(double); +IMPORT CONST double Sleef_atanh_u10(double); +IMPORT CONST double Sleef_exp2_u10(double); +IMPORT CONST double Sleef_exp10_u10(double); +IMPORT CONST double Sleef_exp2_u35(double); +IMPORT CONST double Sleef_exp10_u35(double); +IMPORT CONST double Sleef_expm1_u10(double); +IMPORT CONST double Sleef_log10_u10(double); +IMPORT CONST double Sleef_log2_u10(double); +IMPORT CONST double Sleef_log2_u35(double); +IMPORT CONST double Sleef_log1p_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u05(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u35(double); +IMPORT CONST double Sleef_sinpi_u05(double); +IMPORT CONST double Sleef_cospi_u05(double); +IMPORT CONST double Sleef_ldexp(double, int); +IMPORT CONST int Sleef_ilogb(double); +IMPORT CONST double Sleef_fma(double, double, double); +IMPORT CONST double Sleef_sqrt(double); +IMPORT CONST double Sleef_sqrt_u05(double); +IMPORT CONST double Sleef_sqrt_u35(double); + +IMPORT CONST double Sleef_hypot_u05(double, double); +IMPORT CONST double Sleef_hypot_u35(double, double); + +IMPORT CONST double Sleef_fabs(double); +IMPORT CONST double Sleef_copysign(double, double); +IMPORT CONST double Sleef_fmax(double, double); +IMPORT CONST double Sleef_fmin(double, double); +IMPORT CONST double Sleef_fdim(double, double); +IMPORT CONST double Sleef_trunc(double); +IMPORT CONST double Sleef_floor(double); +IMPORT CONST double Sleef_ceil(double); +IMPORT CONST double Sleef_round(double); +IMPORT CONST double Sleef_rint(double); +IMPORT CONST double Sleef_nextafter(double, double); +IMPORT CONST double Sleef_frfrexp(double); +IMPORT CONST int Sleef_expfrexp(double); +IMPORT CONST double Sleef_fmod(double, double); +IMPORT CONST double Sleef_remainder(double, double); +IMPORT CONST Sleef_double2 Sleef_modf(double); + +IMPORT CONST double Sleef_lgamma_u10(double); +IMPORT CONST double Sleef_tgamma_u10(double); +IMPORT CONST double Sleef_erf_u10(double); +IMPORT CONST double Sleef_erfc_u15(double); + +IMPORT CONST float Sleef_sinf_u35(float); +IMPORT CONST float Sleef_cosf_u35(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u35(float); +IMPORT CONST float Sleef_tanf_u35(float); +IMPORT CONST float Sleef_asinf_u35(float); +IMPORT CONST float Sleef_acosf_u35(float); +IMPORT CONST float Sleef_atanf_u35(float); +IMPORT CONST float Sleef_atan2f_u35(float, float); +IMPORT CONST float Sleef_logf_u35(float); +IMPORT CONST float Sleef_cbrtf_u35(float); +IMPORT CONST float Sleef_sinf_u10(float); +IMPORT CONST float Sleef_cosf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u10(float); +IMPORT CONST float Sleef_fastsinf_u3500(float); +IMPORT CONST float Sleef_fastcosf_u3500(float); +IMPORT CONST float Sleef_tanf_u10(float); +IMPORT CONST float Sleef_asinf_u10(float); +IMPORT CONST float Sleef_acosf_u10(float); +IMPORT CONST float Sleef_atanf_u10(float); +IMPORT CONST float Sleef_atan2f_u10(float, float); +IMPORT CONST float Sleef_logf_u10(float); +IMPORT CONST float Sleef_cbrtf_u10(float); +IMPORT CONST float Sleef_expf_u10(float); +IMPORT CONST float Sleef_powf_u10(float, float); +IMPORT CONST float Sleef_fastpowf_u3500(float, float); +IMPORT CONST float Sleef_sinhf_u10(float); +IMPORT CONST float Sleef_coshf_u10(float); +IMPORT CONST float Sleef_tanhf_u10(float); +IMPORT CONST float Sleef_sinhf_u35(float); +IMPORT CONST float Sleef_coshf_u35(float); +IMPORT CONST float Sleef_tanhf_u35(float); +IMPORT CONST float Sleef_asinhf_u10(float); +IMPORT CONST float Sleef_acoshf_u10(float); +IMPORT CONST float Sleef_atanhf_u10(float); +IMPORT CONST float Sleef_exp2f_u10(float); +IMPORT CONST float Sleef_exp10f_u10(float); +IMPORT CONST float Sleef_exp2f_u35(float); +IMPORT CONST float Sleef_exp10f_u35(float); +IMPORT CONST float Sleef_expm1f_u10(float); +IMPORT CONST float Sleef_log10f_u10(float); +IMPORT CONST float Sleef_log2f_u10(float); +IMPORT CONST float Sleef_log2f_u35(float); +IMPORT CONST float Sleef_log1pf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u05(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u35(float); +IMPORT CONST float Sleef_sinpif_u05(float d); +IMPORT CONST float Sleef_cospif_u05(float d); +IMPORT CONST float Sleef_ldexpf(float, int); +IMPORT CONST int Sleef_ilogbf(float); +IMPORT CONST float Sleef_fmaf(float, float, float); +IMPORT CONST float Sleef_sqrtf(float); +IMPORT CONST float Sleef_sqrtf_u05(float); +IMPORT CONST float Sleef_sqrtf_u35(float); + +IMPORT CONST float Sleef_hypotf_u05(float, float); +IMPORT CONST float Sleef_hypotf_u35(float, float); + +IMPORT CONST float Sleef_fabsf(float); +IMPORT CONST float Sleef_copysignf(float, float); +IMPORT CONST float Sleef_fmaxf(float, float); +IMPORT CONST float Sleef_fminf(float, float); +IMPORT CONST float Sleef_fdimf(float, float); +IMPORT CONST float Sleef_truncf(float); +IMPORT CONST float Sleef_floorf(float); +IMPORT CONST float Sleef_ceilf(float); +IMPORT CONST float Sleef_roundf(float); +IMPORT CONST float Sleef_rintf(float); +IMPORT CONST float Sleef_nextafterf(float, float); +IMPORT CONST float Sleef_frfrexpf(float); +IMPORT CONST int Sleef_expfrexpf(float); +IMPORT CONST float Sleef_fmodf(float, float); +IMPORT CONST float Sleef_remainderf(float, float); +IMPORT CONST Sleef_float2 Sleef_modff(float); + +IMPORT CONST float Sleef_lgammaf_u10(float); +IMPORT CONST float Sleef_tgammaf_u10(float); +IMPORT CONST float Sleef_erff_u10(float); +IMPORT CONST float Sleef_erfcf_u15(float); + +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u05(long double); +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u35(long double); + +#if defined(Sleef_quad2_DEFINED) +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u05(Sleef_quad); +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u35(Sleef_quad); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2(__m128d); +IMPORT CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2(__m128d); +IMPORT CONST __m128d Sleef_floord2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2(__m128d); +IMPORT CONST __m128d Sleef_ceild2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2(__m128d); +IMPORT CONST __m128d Sleef_roundd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2(__m128d); +IMPORT CONST __m128d Sleef_rintd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15(__m128d); +IMPORT CONST int Sleef_getIntd2(int); +IMPORT CONST void *Sleef_getPtrd2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4(__m128); +IMPORT CONST __m128 Sleef_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4(__m128); +IMPORT CONST __m128 Sleef_floorf4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4(__m128); +IMPORT CONST __m128 Sleef_ceilf4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4(__m128); +IMPORT CONST __m128 Sleef_roundf4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4(__m128); +IMPORT CONST __m128 Sleef_rintf4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15(__m128); +IMPORT CONST int Sleef_getIntf4(int); +IMPORT CONST int Sleef_cinz_getIntf4(int); +IMPORT CONST void *Sleef_getPtrf4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d); +IMPORT CONST int Sleef_getIntd2_sse2(int); +IMPORT CONST void *Sleef_getPtrd2_sse2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128); +IMPORT CONST int Sleef_getIntf4_sse2(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse2(int); +IMPORT CONST void *Sleef_getPtrf4_sse2(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d); +IMPORT CONST int Sleef_getIntd2_sse4(int); +IMPORT CONST void *Sleef_getPtrd2_sse4(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128); +IMPORT CONST int Sleef_getIntf4_sse4(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse4(int); +IMPORT CONST void *Sleef_getPtrf4_sse4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4(__m256d); +IMPORT CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4(__m256d); +IMPORT CONST __m256d Sleef_floord4(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4(__m256d); +IMPORT CONST __m256d Sleef_ceild4(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4(__m256d); +IMPORT CONST __m256d Sleef_roundd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4(__m256d); +IMPORT CONST __m256d Sleef_rintd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15(__m256d); +IMPORT CONST int Sleef_getIntd4(int); +IMPORT CONST void *Sleef_getPtrd4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8(__m256); +IMPORT CONST __m256 Sleef_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8(__m256); +IMPORT CONST __m256 Sleef_floorf8(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8(__m256); +IMPORT CONST __m256 Sleef_ceilf8(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8(__m256); +IMPORT CONST __m256 Sleef_roundf8(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8(__m256); +IMPORT CONST __m256 Sleef_rintf8(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8(__m256); +IMPORT CONST __m256 Sleef_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15(__m256); +IMPORT CONST int Sleef_getIntf8(int); +IMPORT CONST int Sleef_cinz_getIntf8(int); +IMPORT CONST void *Sleef_getPtrf8(int); +IMPORT CONST void *Sleef_cinz_getPtrf8(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d); +IMPORT CONST int Sleef_getIntd4_avx(int); +IMPORT CONST void *Sleef_getPtrd4_avx(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256); +IMPORT CONST int Sleef_getIntf8_avx(int); +IMPORT CONST int Sleef_cinz_getIntf8_avx(int); +IMPORT CONST void *Sleef_getPtrf8_avx(int); +IMPORT CONST void *Sleef_cinz_getPtrf8_avx(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d); +IMPORT CONST int Sleef_getIntd4_fma4(int); +IMPORT CONST void *Sleef_getPtrd4_fma4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256); +IMPORT CONST int Sleef_getIntf8_fma4(int); +IMPORT CONST int Sleef_finz_getIntf8_fma4(int); +IMPORT CONST void *Sleef_getPtrf8_fma4(int); +IMPORT CONST void *Sleef_finz_getPtrf8_fma4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d); +IMPORT CONST int Sleef_getIntd4_avx2(int); +IMPORT CONST void *Sleef_getPtrd4_avx2(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256); +IMPORT CONST int Sleef_getIntf8_avx2(int); +IMPORT CONST int Sleef_finz_getIntf8_avx2(int); +IMPORT CONST void *Sleef_getPtrf8_avx2(int); +IMPORT CONST void *Sleef_finz_getPtrf8_avx2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d); +IMPORT CONST int Sleef_getIntd2_avx2128(int); +IMPORT CONST void *Sleef_getPtrd2_avx2128(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128); +IMPORT CONST int Sleef_getIntf4_avx2128(int); +IMPORT CONST int Sleef_finz_getIntf4_avx2128(int); +IMPORT CONST void *Sleef_getPtrf4_avx2128(int); +IMPORT CONST void *Sleef_finz_getPtrf4_avx2128(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8(__m512d); +IMPORT CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8(__m512d); +IMPORT CONST __m512d Sleef_floord8(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8(__m512d); +IMPORT CONST __m512d Sleef_ceild8(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8(__m512d); +IMPORT CONST __m512d Sleef_roundd8(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8(__m512d); +IMPORT CONST __m512d Sleef_rintd8(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15(__m512d); +IMPORT CONST int Sleef_getIntd8(int); +IMPORT CONST void *Sleef_getPtrd8(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16(__m512); +IMPORT CONST __m512 Sleef_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16(__m512); +IMPORT CONST __m512 Sleef_floorf16(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16(__m512); +IMPORT CONST __m512 Sleef_ceilf16(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16(__m512); +IMPORT CONST __m512 Sleef_roundf16(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16(__m512); +IMPORT CONST __m512 Sleef_rintf16(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16(__m512); +IMPORT CONST __m512 Sleef_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15(__m512); +IMPORT CONST int Sleef_getIntf16(int); +IMPORT CONST int Sleef_finz_getIntf16(int); +IMPORT CONST void *Sleef_getPtrf16(int); +IMPORT CONST void *Sleef_finz_getPtrf16(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512f(int); +IMPORT CONST void *Sleef_getPtrd8_avx512f(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512); +IMPORT CONST int Sleef_getIntf16_avx512f(int); +IMPORT CONST int Sleef_finz_getIntf16_avx512f(int); +IMPORT CONST void *Sleef_getPtrf16_avx512f(int); +IMPORT CONST void *Sleef_finz_getPtrf16_avx512f(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrd8_avx512fnofma(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST int Sleef_getIntf16_avx512fnofma(int); +IMPORT CONST int Sleef_cinz_getIntf16_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrf16_avx512fnofma(int); +IMPORT CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purec(double); +IMPORT CONST double Sleef_cinz_sind1_u35purec(double); +IMPORT CONST double Sleef_cosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double); +IMPORT CONST double Sleef_tand1_u35purec(double); +IMPORT CONST double Sleef_cinz_tand1_u35purec(double); +IMPORT CONST double Sleef_asind1_u35purec(double); +IMPORT CONST double Sleef_cinz_asind1_u35purec(double); +IMPORT CONST double Sleef_acosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u35purec(double); +IMPORT CONST double Sleef_atand1_u35purec(double); +IMPORT CONST double Sleef_cinz_atand1_u35purec(double); +IMPORT CONST double Sleef_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_logd1_u35purec(double); +IMPORT CONST double Sleef_cinz_logd1_u35purec(double); +IMPORT CONST double Sleef_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_sind1_u10purec(double); +IMPORT CONST double Sleef_cinz_sind1_u10purec(double); +IMPORT CONST double Sleef_cosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double); +IMPORT CONST double Sleef_tand1_u10purec(double); +IMPORT CONST double Sleef_cinz_tand1_u10purec(double); +IMPORT CONST double Sleef_asind1_u10purec(double); +IMPORT CONST double Sleef_cinz_asind1_u10purec(double); +IMPORT CONST double Sleef_acosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u10purec(double); +IMPORT CONST double Sleef_atand1_u10purec(double); +IMPORT CONST double Sleef_cinz_atand1_u10purec(double); +IMPORT CONST double Sleef_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_logd1_u10purec(double); +IMPORT CONST double Sleef_cinz_logd1_u10purec(double); +IMPORT CONST double Sleef_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_expd1_u10purec(double); +IMPORT CONST double Sleef_cinz_expd1_u10purec(double); +IMPORT CONST double Sleef_powd1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_powd1_u10purec(double, double); +IMPORT CONST double Sleef_sinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u10purec(double); +IMPORT CONST double Sleef_coshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u10purec(double); +IMPORT CONST double Sleef_tanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u10purec(double); +IMPORT CONST double Sleef_sinhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u35purec(double); +IMPORT CONST double Sleef_coshd1_u35purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u35purec(double); +IMPORT CONST double Sleef_tanhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u35purec(double); +IMPORT CONST double Sleef_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_cinz_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_asinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_asinhd1_u10purec(double); +IMPORT CONST double Sleef_acoshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acoshd1_u10purec(double); +IMPORT CONST double Sleef_atanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_atanhd1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u35purec(double); +IMPORT CONST double Sleef_exp10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u10purec(double); +IMPORT CONST double Sleef_exp10d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u35purec(double); +IMPORT CONST double Sleef_expm1d1_u10purec(double); +IMPORT CONST double Sleef_cinz_expm1d1_u10purec(double); +IMPORT CONST double Sleef_log10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log10d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u35purec(double); +IMPORT CONST double Sleef_log1pd1_u10purec(double); +IMPORT CONST double Sleef_cinz_log1pd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double); +IMPORT CONST double Sleef_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cinz_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cospid1_u05purec(double); +IMPORT CONST double Sleef_cinz_cospid1_u05purec(double); +IMPORT CONST double Sleef_ldexpd1_purec(double, int32_t); +IMPORT CONST double Sleef_cinz_ldexpd1_purec(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_ilogbd1_purec(double); +IMPORT CONST double Sleef_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_cinz_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_purec(double); +IMPORT CONST double Sleef_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_fabsd1_purec(double); +IMPORT CONST double Sleef_cinz_fabsd1_purec(double); +IMPORT CONST double Sleef_copysignd1_purec(double, double); +IMPORT CONST double Sleef_cinz_copysignd1_purec(double, double); +IMPORT CONST double Sleef_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_fmind1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmind1_purec(double, double); +IMPORT CONST double Sleef_fdimd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fdimd1_purec(double, double); +IMPORT CONST double Sleef_truncd1_purec(double); +IMPORT CONST double Sleef_cinz_truncd1_purec(double); +IMPORT CONST double Sleef_floord1_purec(double); +IMPORT CONST double Sleef_cinz_floord1_purec(double); +IMPORT CONST double Sleef_ceild1_purec(double); +IMPORT CONST double Sleef_cinz_ceild1_purec(double); +IMPORT CONST double Sleef_roundd1_purec(double); +IMPORT CONST double Sleef_cinz_roundd1_purec(double); +IMPORT CONST double Sleef_rintd1_purec(double); +IMPORT CONST double Sleef_cinz_rintd1_purec(double); +IMPORT CONST double Sleef_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_cinz_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_frfrexpd1_purec(double); +IMPORT CONST double Sleef_cinz_frfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_expfrexpd1_purec(double); +IMPORT CONST double Sleef_fmodd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmodd1_purec(double, double); +IMPORT CONST double Sleef_remainderd1_purec(double, double); +IMPORT CONST double Sleef_cinz_remainderd1_purec(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double); +IMPORT CONST double Sleef_lgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_lgammad1_u10purec(double); +IMPORT CONST double Sleef_tgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_tgammad1_u10purec(double); +IMPORT CONST double Sleef_erfd1_u10purec(double); +IMPORT CONST double Sleef_cinz_erfd1_u10purec(double); +IMPORT CONST double Sleef_erfcd1_u15purec(double); +IMPORT CONST double Sleef_cinz_erfcd1_u15purec(double); +IMPORT CONST int Sleef_getIntd1_purec(int); +IMPORT CONST void *Sleef_getPtrd1_purec(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u35purec(float); +IMPORT CONST float Sleef_cosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float); +IMPORT CONST float Sleef_tanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u35purec(float); +IMPORT CONST float Sleef_asinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u35purec(float); +IMPORT CONST float Sleef_acosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u35purec(float); +IMPORT CONST float Sleef_atanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u35purec(float); +IMPORT CONST float Sleef_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_logf1_u35purec(float); +IMPORT CONST float Sleef_cinz_logf1_u35purec(float); +IMPORT CONST float Sleef_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_sinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u10purec(float); +IMPORT CONST float Sleef_cosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float); +IMPORT CONST float Sleef_tanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u10purec(float); +IMPORT CONST float Sleef_asinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u10purec(float); +IMPORT CONST float Sleef_acosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u10purec(float); +IMPORT CONST float Sleef_atanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u10purec(float); +IMPORT CONST float Sleef_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_logf1_u10purec(float); +IMPORT CONST float Sleef_cinz_logf1_u10purec(float); +IMPORT CONST float Sleef_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_expf1_u10purec(float); +IMPORT CONST float Sleef_cinz_expf1_u10purec(float); +IMPORT CONST float Sleef_powf1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_powf1_u10purec(float, float); +IMPORT CONST float Sleef_sinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u10purec(float); +IMPORT CONST float Sleef_coshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u10purec(float); +IMPORT CONST float Sleef_tanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u10purec(float); +IMPORT CONST float Sleef_sinhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u35purec(float); +IMPORT CONST float Sleef_coshf1_u35purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u35purec(float); +IMPORT CONST float Sleef_tanhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u35purec(float); +IMPORT CONST float Sleef_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_cinz_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_asinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinhf1_u10purec(float); +IMPORT CONST float Sleef_acoshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acoshf1_u10purec(float); +IMPORT CONST float Sleef_atanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanhf1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u35purec(float); +IMPORT CONST float Sleef_exp10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u10purec(float); +IMPORT CONST float Sleef_exp10f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u35purec(float); +IMPORT CONST float Sleef_expm1f1_u10purec(float); +IMPORT CONST float Sleef_cinz_expm1f1_u10purec(float); +IMPORT CONST float Sleef_log10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log10f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u35purec(float); +IMPORT CONST float Sleef_log1pf1_u10purec(float); +IMPORT CONST float Sleef_cinz_log1pf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float); +IMPORT CONST float Sleef_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cinz_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cospif1_u05purec(float); +IMPORT CONST float Sleef_cinz_cospif1_u05purec(float); +IMPORT CONST float Sleef_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_cinz_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_purec(float); +IMPORT CONST float Sleef_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_fabsf1_purec(float); +IMPORT CONST float Sleef_cinz_fabsf1_purec(float); +IMPORT CONST float Sleef_copysignf1_purec(float, float); +IMPORT CONST float Sleef_cinz_copysignf1_purec(float, float); +IMPORT CONST float Sleef_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_fminf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fminf1_purec(float, float); +IMPORT CONST float Sleef_fdimf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fdimf1_purec(float, float); +IMPORT CONST float Sleef_truncf1_purec(float); +IMPORT CONST float Sleef_cinz_truncf1_purec(float); +IMPORT CONST float Sleef_floorf1_purec(float); +IMPORT CONST float Sleef_cinz_floorf1_purec(float); +IMPORT CONST float Sleef_ceilf1_purec(float); +IMPORT CONST float Sleef_cinz_ceilf1_purec(float); +IMPORT CONST float Sleef_roundf1_purec(float); +IMPORT CONST float Sleef_cinz_roundf1_purec(float); +IMPORT CONST float Sleef_rintf1_purec(float); +IMPORT CONST float Sleef_cinz_rintf1_purec(float); +IMPORT CONST float Sleef_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_cinz_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_frfrexpf1_purec(float); +IMPORT CONST float Sleef_cinz_frfrexpf1_purec(float); +IMPORT CONST float Sleef_fmodf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmodf1_purec(float, float); +IMPORT CONST float Sleef_remainderf1_purec(float, float); +IMPORT CONST float Sleef_cinz_remainderf1_purec(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_modff1_purec(float); +IMPORT CONST float Sleef_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_erff1_u10purec(float); +IMPORT CONST float Sleef_cinz_erff1_u10purec(float); +IMPORT CONST float Sleef_erfcf1_u15purec(float); +IMPORT CONST float Sleef_cinz_erfcf1_u15purec(float); +IMPORT CONST int Sleef_getIntf1_purec(int); +IMPORT CONST int Sleef_cinz_getIntf1_purec(int); +IMPORT CONST void *Sleef_getPtrf1_purec(int); +IMPORT CONST void *Sleef_cinz_getPtrf1_purec(int); +#endif +#ifdef FP_FAST_FMA + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u35purecfma(double); +IMPORT CONST double Sleef_cosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double); +IMPORT CONST double Sleef_tand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u35purecfma(double); +IMPORT CONST double Sleef_asind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u35purecfma(double); +IMPORT CONST double Sleef_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_atand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u35purecfma(double); +IMPORT CONST double Sleef_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_logd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u35purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_sind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u10purecfma(double); +IMPORT CONST double Sleef_cosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double); +IMPORT CONST double Sleef_tand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u10purecfma(double); +IMPORT CONST double Sleef_asind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u10purecfma(double); +IMPORT CONST double Sleef_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_atand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u10purecfma(double); +IMPORT CONST double Sleef_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_logd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u10purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_expd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expd1_u10purecfma(double); +IMPORT CONST double Sleef_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_log1pd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log1pd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double); +IMPORT CONST double Sleef_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_ldexpd1_purecfma(double, int32_t); +IMPORT CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_ilogbd1_purecfma(double); +IMPORT CONST double Sleef_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_finz_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_fabsd1_purecfma(double); +IMPORT CONST double Sleef_finz_fabsd1_purecfma(double); +IMPORT CONST double Sleef_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_truncd1_purecfma(double); +IMPORT CONST double Sleef_finz_truncd1_purecfma(double); +IMPORT CONST double Sleef_floord1_purecfma(double); +IMPORT CONST double Sleef_finz_floord1_purecfma(double); +IMPORT CONST double Sleef_ceild1_purecfma(double); +IMPORT CONST double Sleef_finz_ceild1_purecfma(double); +IMPORT CONST double Sleef_roundd1_purecfma(double); +IMPORT CONST double Sleef_finz_roundd1_purecfma(double); +IMPORT CONST double Sleef_rintd1_purecfma(double); +IMPORT CONST double Sleef_finz_rintd1_purecfma(double); +IMPORT CONST double Sleef_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_frfrexpd1_purecfma(double); +IMPORT CONST double Sleef_finz_frfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_expfrexpd1_purecfma(double); +IMPORT CONST double Sleef_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_remainderd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_remainderd1_purecfma(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double); +IMPORT CONST double Sleef_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_erfcd1_u15purecfma(double); +IMPORT CONST double Sleef_finz_erfcd1_u15purecfma(double); +IMPORT CONST int Sleef_getIntd1_purecfma(int); +IMPORT CONST void *Sleef_getPtrd1_purecfma(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_cosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float); +IMPORT CONST float Sleef_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_logf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u35purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_cosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float); +IMPORT CONST float Sleef_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_logf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u10purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_expf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expf1_u10purecfma(float); +IMPORT CONST float Sleef_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_log1pf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log1pf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float); +IMPORT CONST float Sleef_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_finz_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_fabsf1_purecfma(float); +IMPORT CONST float Sleef_finz_fabsf1_purecfma(float); +IMPORT CONST float Sleef_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_truncf1_purecfma(float); +IMPORT CONST float Sleef_finz_truncf1_purecfma(float); +IMPORT CONST float Sleef_floorf1_purecfma(float); +IMPORT CONST float Sleef_finz_floorf1_purecfma(float); +IMPORT CONST float Sleef_ceilf1_purecfma(float); +IMPORT CONST float Sleef_finz_ceilf1_purecfma(float); +IMPORT CONST float Sleef_roundf1_purecfma(float); +IMPORT CONST float Sleef_finz_roundf1_purecfma(float); +IMPORT CONST float Sleef_rintf1_purecfma(float); +IMPORT CONST float Sleef_finz_rintf1_purecfma(float); +IMPORT CONST float Sleef_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_finz_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_remainderf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_remainderf1_purecfma(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float); +IMPORT CONST float Sleef_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_erff1_u10purecfma(float); +IMPORT CONST float Sleef_finz_erff1_u10purecfma(float); +IMPORT CONST float Sleef_erfcf1_u15purecfma(float); +IMPORT CONST float Sleef_finz_erfcf1_u15purecfma(float); +IMPORT CONST int Sleef_getIntf1_purecfma(int); +IMPORT CONST int Sleef_finz_getIntf1_purecfma(int); +IMPORT CONST void *Sleef_getPtrf1_purecfma(int); +IMPORT CONST void *Sleef_finz_getPtrf1_purecfma(int); +#endif +#ifdef __cplusplus +} +#endif + +#undef IMPORT +#endif // #ifndef __SLEEF_H__ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/xnnpack.h b/env-llmeval/lib/python3.10/site-packages/torch/include/xnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..705cb6a39fb1c566534b064e71ab98f5a9488b98 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/xnnpack.h @@ -0,0 +1,4535 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// Copyright 2019 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// The number of bytes XNNPACK may read beyond array bounds. +/// The caller must allocate at least this many extra bytes after the tensor data passed to XNNPACK. +/// +/// Note: XNNPACK reads, but never writes beyond array bounds. +#define XNN_EXTRA_BYTES 16 + +/// Maximum number of dimensions in tensor shape. +#define XNN_MAX_TENSOR_DIMS 6 + +/// Allow sparse inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider sparse inference, but does not guarantee it. +#define XNN_FLAG_SPARSE_INFERENCE 0x00000001 +#define XNN_FLAG_HINT_SPARSE_INFERENCE XNN_FLAG_SPARSE_INFERENCE + +/// Allow IEEE FP16 inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider IEEE FP16 inference, but does not guarantee it. +#define XNN_FLAG_FP16_INFERENCE 0x00000002 +#define XNN_FLAG_HINT_FP16_INFERENCE XNN_FLAG_FP16_INFERENCE + +/// Force IEEE FP16 inference in a Runtime, and fail if FP16 inference is not possible. +/// +/// Note: this flag guarantees that XNNPACK will use IEEE FP16 inference, or fail to create the Runtime object. +/// Warning: on x86 systems FP16 computations will be emulated at a substantial performance cost. +#define XNN_FLAG_FORCE_FP16_INFERENCE 0x00000004 + +/// Enable timing of each operator's runtime. +#define XNN_FLAG_BASIC_PROFILING 0x00000008 + +/// The convolution operator represents a depthwise convolution, and use HWGo layout for filters. +#define XNN_FLAG_DEPTHWISE_CONVOLUTION 0x00000001 + +/// Assume transposed weights in a fully connected operator. +#define XNN_FLAG_TRANSPOSE_WEIGHTS 0x00000001 + +/// The operator assumes NHWC layout for the input, regardless of the output layout. +#define XNN_FLAG_INPUT_NHWC 0x00000002 + +/// Match "SAME" padding in TensorFlow. Exact padding values are computed dynamically depending on input size. +#define XNN_FLAG_TENSORFLOW_SAME_PADDING 0x00000004 + +/// Implicitly flatten and reshape input of a Fully Connected operator into a 2D tensor. +#define XNN_FLAG_TENSORFLOW_RESHAPE_2D 0x00000004 + +/// Match behaviour of TensorFlow 1.x. +#define XNN_FLAG_TENSORFLOW_LEGACY_MODE 0x00000004 + +/// Static weights of the FP16 operator are in FP32 format. +#define XNN_FLAG_FP32_STATIC_WEIGHTS 0x00000008 + +/// Align corners of input and output images in resize operations. +#define XNN_FLAG_ALIGN_CORNERS 0x00000008 + +/// Yield worker threads of the thread pool to the system scheduler after the inference. +#define XNN_FLAG_YIELD_WORKERS 0x00000010 + +/// Status code for any XNNPACK function call. +enum xnn_status { + /// The call succeeded, and all output arguments now contain valid data. + xnn_status_success = 0, + xnn_status_uninitialized = 1, + xnn_status_invalid_parameter = 2, + xnn_status_invalid_state = 3, + xnn_status_unsupported_parameter = 4, + xnn_status_unsupported_hardware = 5, + xnn_status_out_of_memory = 6, +}; + +struct xnn_allocator { + /// User-specified pointer that will be passed as-is to all functions in this structure. + void* context; + /// Pointer to a function to be called for general memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*allocate)(void* context, size_t size); + /// Pointer to a function to be called for general memory re-allocation, i.e. to increase or shrink a previously + /// allocated memory block. The content of the old memory block is copied to the new memory block. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref reallocate call is equivalent to an @ref allocate call. + /// @param size - The new size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the newly allocated memory block of at least @ref size bytes with the content of the previous + /// memory block. + /// If allocation fails, the function must return NULL, but must not release the previous memory block. + void* (*reallocate)(void* context, void* pointer, size_t size); + /// Pointer to a function to be called for general memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref deallocate call is a no-op. + void (*deallocate)(void* context, void* pointer); + /// Pointer to a function to be called for aligned memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param alignment - The alignment of the memory block to allocate, in bytes. Alignment is always a power-of-2. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*aligned_allocate)(void* context, size_t alignment, size_t size); + /// Pointer to a function to be called for aligned memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref aligned_allocate function. Can be NULL. + /// If the pointer is NULL, the @ref aligned_deallocate call is a no-op. + void (*aligned_deallocate)(void* context, void* pointer); +}; + +/// Initialize XNNPACK library. +/// +/// XNNPACK must be successfully initialized before use. During initialization, XNNPACK populates internal structures +/// depending on the host processor. Initialization can be time-consuming. +/// +/// @param[in] allocator - structure with function pointers to be use for memory allocation and de-allocation. +/// If this argument is NULL, system-provided memory management functions (e.g. malloc/free) +/// will be used. +/// +/// @retval xnn_status_success - XNNPACK is successfully initialized and ready to use. +/// @retval xnn_status_out_of_memory - initialization failed due to out-of-memory condition. +/// @retval xnn_status_unsupported_hardware - initialization failed because the host processor does not satisfy the +/// minimum hardware requirements for XNNPACK. E.g. this may happen on x86 +/// processors without SSE2 extension, or on 32-bit ARM processors without +/// the NEON SIMD extension. +enum xnn_status xnn_initialize(const struct xnn_allocator* allocator); + +/// Deinitialize XNNPACK library. +/// +/// To avoid memory and resource leaks, users must call xnn_deinitialize once for each successful xnn_initialize call. +/// +/// @retval xnn_status_success - deinitialization call succeeded. +enum xnn_status xnn_deinitialize(void); + +/// Subgraph is an abstract representation of a neural network model. +/// Subgraph objects are used to define Values (tensors) and Nodes (operators) comprising the model. +typedef struct xnn_subgraph* xnn_subgraph_t; + +/// Create a empty Subgraph object. +/// +/// @param external_value_ids - number of Value IDs to reserve for communication with external graph representation. +/// The Subgraph object would avoid creating internal Value IDs in the +/// [0, reserved_value_ids-1] range. +/// @param flags - binary features of the subgraph. No supported flags are currently defined. +/// @param subgraph_out - pointer to the variable that will be initialized with a handle to the Subgraph object upon +/// successful return. +enum xnn_status xnn_create_subgraph( + uint32_t external_value_ids, + uint32_t flags, + xnn_subgraph_t* subgraph_out); + +/// Destroy a Subgraph object, as well as Values, and Nodes associated with the subgraph. +/// +/// @param subgraph - the Subgraph object to destroy. +enum xnn_status xnn_delete_subgraph( + xnn_subgraph_t subgraph); + +#define XNN_VALUE_FLAG_EXTERNAL_INPUT 0x00000001 +#define XNN_VALUE_FLAG_EXTERNAL_OUTPUT 0x00000002 +#define XNN_VALUE_FLAG_PERSISTENT 0x00000004 + +#define XNN_INVALID_VALUE_ID UINT32_MAX + +/// Type of elements in a Value object. +enum xnn_datatype { + /// Invalid data type. Valid Values never have this datatype. + xnn_datatype_invalid = 0, + /// IEEE754 single-precision floating-point. + xnn_datatype_fp32 = 1, + /// IEEE754 half-precision floating-point. + xnn_datatype_fp16 = 2, + /// Quantized 8-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint8 = 3, + /// Quantized 8-bit unsigned integer with shared per-Value quantization parameters. + xnn_datatype_quint8 = 4, + /// Quantized 32-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint32 = 5, + /// Quantized 8-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint8 = 6, + /// Quantized 32-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint32 = 7, +}; + +/// Define a tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - multiplication factor to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + int32_t zero_point, + float scale, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a channelwise quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param scale - per-channel multiplication factors to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters. +/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution, +/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in +/// the Depthwise Convolution operators. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_channelwise_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a Convert Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Convert Node. No supported flags are currently defined. +enum xnn_status xnn_define_convert( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with [groups * +/// group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Deconvolution (Transposed Convolution) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param adjustment_height - additional elements in the bottom of the 2D output data. +/// @param adjustment_width - additional elements to the right of the 2D output data. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param upsampling_height - height of upsampling region for deconvolution input (deconvolution height stride). +/// @param upsampling_width - width of upsampling region for deconvolution input (deconvolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [groups * group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Deconvolution Node. No supported flags are currently defined. +enum xnn_status xnn_define_deconvolution_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t upsampling_height, + uint32_t upsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Depthwise Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param depth_multiplier - ratio of output channels to input channels. +/// @param input_channels - number of input channels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [1, kernel_height, kernel_width, input_channels * depth_multiplier] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Depthwise Convolution Node without +/// a bias. If present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [input_channels * depth_multiplier] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, input_channels * depth_multiplier] dimensions. +/// @param flags - binary features of the 2D Depthwise Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_depthwise_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t depth_multiplier, + size_t input_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Depth To Space Node and add it to a Subgraph. +/// +/// The Depth To Space Node rearranges data from depth into blocks of spatial data (a reverse transform to +/// Space To Depth). For a given input pixel, an output square of pixels with side @a block_size is formed from values +/// in the corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times +/// smaller than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param block_size - the size of the spatial block. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_depth_to_space( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t block_size, + uint32_t flags); + +/// Define a 1D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 1D Global Average Pooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_global_average_pooling_1d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 2D Global Average Pooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_global_average_pooling_2d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Average Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_average_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Fully Connected Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the +/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least +/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if +/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be +/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of +/// [num_input_elements] dimensions, then reshaped into a 2D tensor of +/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph. +/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have +/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is +/// specified, the filter tensor must have [input_channels, output_channels] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias. +/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels] +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same +/// dimensionality as the input tensor, all its dimensions but the last one must match the +/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must +/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output +/// must be a 2D tensor of [batch_size, output_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of +/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param flags - binary features of the Fully Connected Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS. +enum xnn_status xnn_define_fully_connected( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Max Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param dilation_height - dilation of pooling elements along the height dimension. +/// @param dilation_width - dilation of pooling elements along the width dimension. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Max Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_max_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D ArgMax Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. +/// @param pooling_height - pooling (kernel) height. Vertical stride between pooling regions match this value. +/// @param pooling_width - pooling (kernel) width. Horizontal stride between pooling regions match this value. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_value_id - Value ID for the output tensor with the maximum values in the pools. The output tensor must +/// be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] dimensions. +/// @param output_index_id - Value ID for the output tensor with the indexes of the maximum values in the pools. The +/// output tensor must be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] +/// dimensions. +/// @param flags - binary features of the 2D ArgMax Pooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_argmax_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_id, + uint32_t output_value_id, + uint32_t output_index_id, + uint32_t flags); + +/// Define a 2D UnPooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param pooling_height - height of the pooling window. +/// @param pooling_width - width of the pooling window. +/// @param input_value_id - Value ID for the input tensor with the max-pooling values to invert. The input value tensor +/// must be a 4D tensor defined in the @a subgraph with [N, IH, IW, channels] dimensions. +/// @param input_index_id - Value ID for the input tensor with the indices of the per-pool maximum values produced by +/// a 2D UnPooling Node. The input tensor must be a 4D tensor defined in the @a subgraph with +/// [N, IH, IW, channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D UnPooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_unpooling_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_value_id, + uint32_t input_index_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Add Node and add it to a Subgraph. +/// +/// The 2-Input Add Node computes elementwise addition of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Add Node. No supported flags are currently defined. +enum xnn_status xnn_define_add2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Multiply Node and add it to a Subgraph. +/// +/// The 2-Input Multiply Node computes elementwise multiplication of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Multiply Node. No supported flags are currently defined. +enum xnn_status xnn_define_multiply2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Subtract Node and add it to a Subgraph. +/// +/// The Subtract Node computes elementwise subtraction of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Subtract Node. No supported flags are currently defined. +enum xnn_status xnn_define_subtract( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Divide Node and add it to a Subgraph. +/// +/// The Divide Node computes elementwise division of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Divide Node. No supported flags are currently defined. +enum xnn_status xnn_define_divide( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Maximum Node and add it to a Subgraph. +/// +/// The 2-Input Maximum Node computes elementwise maximum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Maximum Node. No supported flags are currently defined. +enum xnn_status xnn_define_maximum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Minimum Node and add it to a Subgraph. +/// +/// The 2-Input Minimum Node computes elementwise minimum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Minimum Node. No supported flags are currently defined. +enum xnn_status xnn_define_minimum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Squared Difference Node and add it to a Subgraph. +/// +/// The Squared Difference Node computes elementwise squared difference of two tensor inputs with numpy broadcasting +/// rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Squared Difference Node. No supported flags are currently defined. +enum xnn_status xnn_define_squared_difference( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Constant Pad Node with static padding specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param pre_paddings - number of padding elements to insert before input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param post_paddings - number of padding elements to insert after input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param padding_value - constant value used to initialize padding elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Constant Pad Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_constant_pad( + xnn_subgraph_t subgraph, + const size_t* pre_paddings, + const size_t* post_paddings, + float padding_value, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Concatenate Node and add it to a Subgraph. +/// +/// The 2-Input Concatenate Node concatenates two tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the two input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// second input. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// first input. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of both inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of both inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate2( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 3-Input Concatenate Node and add it to a Subgraph. +/// +/// The 3-Input Concatenate Node concatenates three tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the three input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate3( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 4-Input Concatenate Node and add it to a Subgraph. +/// +/// The 4-Input Concatenate Node concatenates four tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the four input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input4_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate4( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t input4_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Copy Node and add it to a Subgraph. +/// +/// The Copy Node copies an input tensor to an output tensor. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the first input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Copy Node. No supported flags are currently defined. +enum xnn_status xnn_define_copy( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Output Split Node and add it to a Subgraph. +/// +/// The 2-Output Split Node splits an input tensor into two output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second output. The split_dim dimension is half of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first output. The split_dim dimension is half of the input's split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split2( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t flags); + +/// Define a 3-Output Split Node and add it to a Subgraph. +/// +/// The 3-Output Split Node splits an input tensor into three output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second and third output. The split_dim dimension is one third of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the second and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split3( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t flags); + +/// Define a 4-Output Split Node and add it to a Subgraph. +/// +/// The 4-Output Split Node splits an input tensor into four output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the other output tensors. The split_dim dimension is one fourth of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output4_id - Value ID for the fourth output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split4( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t output4_id, + uint32_t flags); + +/// Define a Reshape Node with static shape specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the output tensor. +/// @param new_shape - shape dimensions of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Reshape Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_reshape( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* new_shape, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Resize Bilinear Node with static output height & width specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param new_height - height dimension of the output tensor. +/// @param new_width - width dimension of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, C] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, new_height, new_width, C] dimensions. +/// @param flags - binary features of the 2D Resize Bilinear Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS, which are mutually exclusive. +enum xnn_status xnn_define_static_resize_bilinear_2d( + xnn_subgraph_t subgraph, + size_t new_height, + size_t new_width, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a PReLU (Parametric ReLU) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param slope_id - Value ID for the bias tensor. The bias tensor must be a 1D tensor defined in the @a subgraph with +/// [channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param flags - binary features of the PReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_prelu( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t slope_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Abs Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Abs Node. No supported flags are currently defined. +enum xnn_status xnn_define_abs( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Bankers' Rounding Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Bankers' Rounding Node. No supported flags are currently defined. +enum xnn_status xnn_define_bankers_rounding( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Ceiling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Ceiling Node. No supported flags are currently defined. +enum xnn_status xnn_define_ceiling( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Clamp Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Clamp Node. No supported flags are currently defined. +enum xnn_status xnn_define_clamp( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define an ELU (Exponential Linear Unit) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param alpha - scale factor for negative output elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the ELU Node. No supported flags are currently defined. +enum xnn_status xnn_define_elu( + xnn_subgraph_t subgraph, + float alpha, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Floor Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Floor Node. No supported flags are currently defined. +enum xnn_status xnn_define_floor( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a HardSwish Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the HardSwish Node. No supported flags are currently defined. +enum xnn_status xnn_define_hardswish( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Leaky ReLU Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param negative_slope - scale factor for negative input elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Leaky ReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_leaky_relu( + xnn_subgraph_t subgraph, + float negative_slope, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Negate Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Negate Node. No supported flags are currently defined. +enum xnn_status xnn_define_negate( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Sigmoid Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Sigmoid Node. No supported flags are currently defined. +enum xnn_status xnn_define_sigmoid( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a SoftMax Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph, and have at +/// least one dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the SoftMax Node. No supported flags are currently defined. +enum xnn_status xnn_define_softmax( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Space To Depth 2D Node and add it to a Subgraph. +/// +/// The Space To Depth 2D Node rearranges blocks of spatial data into blocks (a reverse transform to Depth To Space 2D). +/// For a given input pixel, an output square of pixels with side @a block_size is formed from values in the +/// corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times greater +/// than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param block_size - the size of the spatial block. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_space_to_depth_2d( + xnn_subgraph_t subgraph, + uint32_t block_size, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Node. No supported flags are currently defined. +enum xnn_status xnn_define_square( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Root Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Root Node. No supported flags are currently defined. +enum xnn_status xnn_define_square_root( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Slice Node add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the input and output tensor. +/// @param offsets - offsets in each dimension of the input tensor. This array must have @a num_dims elements. +/// @param sizes - size of each dimension in output tensor. This array must have @a num_dims elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// dimensions must match @a sizes. +/// @param flags - binary features of the Static Slice Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_slice( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* offsets, + const size_t* sizes, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Transpose Node and add it to a Subgraph. +/// +/// The Static Transpose Node applies a generalized transpose to the input tensor using the permuation in perm. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to its corresponding permuted input dimension. +/// @param num_dims - the number of permutation dimensions. This must be equal to the number of input dimensions. +/// @param perm - The permutation of the axis of the input tensor. The perm array must must contain 0 to N-1 in the +/// permuted order. +/// @param flags - binary features of the Static Transpose Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_transpose( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* perm, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Weights cache is a cache for packed weights. It can be reused between runtimes. +typedef struct xnn_weights_cache* xnn_weights_cache_t; + +enum xnn_status xnn_create_weights_cache(xnn_weights_cache_t* weights_cache_out); + +/// Create a weights cache object specifying the initial size of weights cache (in bytes). +/// @size - initial capacity of the weights cache (in bytes), i.e. it can hold size bytes without growing. +/// @param weights_cache_out - pointer to the variable that will be initialized to a handle to the weights cache object +/// upon successful return. Once created, the weights cache object can be shared between +/// different Runtime objects. +enum xnn_status xnn_create_weights_cache_with_size(size_t size, xnn_weights_cache_t* weights_cache_out); + + +/// Weights cache can be finalized in these ways: +enum xnn_weights_cache_finalization_kind { + /// Weights cache is finalized, no insert operations into the weights cache is allowed, even if the "inserted" + /// weights already exist in thee cache. Weights cache memory will also be trimmed to page boundary and set to + /// read-only (to prevent writes). + xnn_weights_cache_finalization_kind_hard, + /// Weights cache will be finalized with some extra space at the end, this allows for "inserting" into the cache only + /// if the weights are already in the cache, and errors on inserting uncached weights. There is memory overhead. + xnn_weights_cache_finalization_kind_soft, +}; + +/// Finalizes the weights cache. The kind of finalization is specified by `finalization_kind`. +/// @param weights_cache - the weights cache object to finalize. +/// @param finalization_kind - the kind of finalization. +enum xnn_status xnn_finalize_weights_cache( + xnn_weights_cache_t weights_cache, + enum xnn_weights_cache_finalization_kind finalization_kind); + +/// Destroy a weights cache object, as well as memory used for the cache. +/// @param weights_cache - the weights cache object to destroy. +enum xnn_status xnn_delete_weights_cache(xnn_weights_cache_t weights_cache); + +typedef struct xnn_workspace* xnn_workspace_t; + +/// Create a workspace object. +/// @param workspace_out - pointer to the variable that will be initialized to a handle to the workspace object upon +/// successful return. Once created, the workspace can be shared between different Runtime +/// objects. +enum xnn_status xnn_create_workspace(xnn_workspace_t* workspace_out); +/// Destroy a workspace object, as well as memory used by the workspace. Object destruction can be deferred until all +/// Runtime objects created with this workspace are destroyed. +/// @param workspace - the workspace object to destroy. +enum xnn_status xnn_release_workspace(xnn_workspace_t workspace); + +/// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values. +typedef struct xnn_runtime* xnn_runtime_t; + +enum xnn_profile_info { + /// Returns a size_t containing the number of operators. + xnn_profile_info_num_operators, + /// Returns a char[] containing the null character separated names of all operators. + xnn_profile_info_operator_name, + /// Returns a uint64_t[] with the runtimes of all operators in the same order as xnn_profile_info_operator_name. + xnn_profile_info_operator_timing, +}; + +/// Return profile information for all operators. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime, @ref xnn_create_runtime_v2 or +/// @ref xnn_create_runtime_v3. +/// @param param_name - type of profile information required. +/// @param param_value_size - the size in bytes of memory pointed to by param_value. If this is not sufficient then +/// param_value_size_ret will be set to the required size and xnn_status_out_of_memory will be +/// returned. +/// @param param_value - a pointer to memory location where appropriate values for a given param_value will be written. +/// @param param_value_size_ret - returns number of bytes required to write the result if param_value_size is not +/// sufficient. +enum xnn_status xnn_get_runtime_profiling_info(xnn_runtime_t runtime, + enum xnn_profile_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +/// Create a Runtime object from a subgraph. +/// +/// @param subgraph - a Subgraph object with all Values and Nodes that would be handled by the runtime. No Values or +/// Nodes can be added to the runtime once it is constructed. +/// @param weights_cache - a cache for packed weights. The runtime will look up and reuse packed weights in this cache, +/// this will reduce memory allocated for packed weights. +/// @param workspace - a workspace to hold internal tensors. The runtime will allocate space used for internal tensors +/// and track them using workspace. Workspace can be shared and reused across different runtimes. If +/// workspace is NULL, there will be no sharing: each runtime has its own workspace. +/// @param threadpool - the thread pool to be used for parallelisation of computations in the runtime. If the thread +/// pool is NULL, the computation would run on the caller thread without parallelization. +/// @param flags - binary features of the runtime. The only currently supported values are +/// XNN_FLAG_HINT_SPARSE_INFERENCE, XNN_FLAG_HINT_FP16_INFERENCE, XNN_FLAG_FORCE_FP16_INFERENCE, and +/// XNN_FLAG_YIELD_WORKERS. If XNN_FLAG_YIELD_WORKERS is specified, worker threads would be yielded to +/// the system scheduler after processing the last operator in the Runtime. +/// @param runtime_out - pointer to the variable that will be initialized with a handle to the Runtime object upon +/// successful return. Once constructed, the Runtime object is independent of the Subgraph object +/// used to create it. +enum xnn_status xnn_create_runtime_v4( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + xnn_workspace_t workspace, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v3( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v2( + xnn_subgraph_t subgraph, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime( + xnn_subgraph_t subgraph, + xnn_runtime_t* runtime_out); + +struct xnn_external_value { + uint32_t id; + void* data; +}; + +/// Setup data pointers for external inputs and outputs in a Runtime object. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must +/// match the number of external inputs and outputs in the runtime, i.e. all external +/// inputs and outputs in the runtime must be specified in one call. +/// @param external_values - array with location information for all external inputs and outputs in the runtime. +enum xnn_status xnn_setup_runtime( + xnn_runtime_t runtime, + size_t num_external_values, + const struct xnn_external_value* external_values); + +/// Execute forward pass for all operators in the runtime. +/// +/// @param runtime - the Runtime object with the execution plan to invoke. +enum xnn_status xnn_invoke_runtime( + xnn_runtime_t runtime); + +/// Destroy a Runtime object, as well as operators and memory associated with it. +/// +/// @param runtime - the Runtime object to destroy. +enum xnn_status xnn_delete_runtime( + xnn_runtime_t runtime); + +typedef struct xnn_operator* xnn_operator_t; + +enum xnn_status xnn_run_operator( + xnn_operator_t op, + pthreadpool_t threadpool); + +enum xnn_status xnn_delete_operator( + xnn_operator_t op); + +#ifndef XNN_NO_F32_OPERATORS + +enum xnn_status xnn_create_abs_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_setup_abs_nc_f32( + xnn_operator_t abs_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_abs_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_setup_add_nd_f32( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_add_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_argmax_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* argmax_pooling_op_out); + +enum xnn_status xnn_setup_argmax_pooling2d_nhwc_f32( + xnn_operator_t argmax_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const float* input, + float* output, + uint32_t* index, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f32( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_bankers_rounding_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_setup_bankers_rounding_nc_f32( + xnn_operator_t rounding_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_bankers_rounding_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_ceiling_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_run_ceiling_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_ceiling_nc_f32( + xnn_operator_t ceiling_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_clamp_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_setup_clamp_nc_f32( + xnn_operator_t clamp_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_clamp_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +typedef const struct xnn_caches* xnn_caches_t; + +enum xnn_status xnn_create_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +// Forward declare. +struct xnn_post_operation; + +/// Create a convolution operator with a number of post operations. The +/// convolution operator created using this function does not have output_min +/// and output_max. The list of operators in post_operations will be applied in +/// order. Convolution with post operations is only supported on JIT platforms +/// and when JIT is enabled. +enum xnn_status xnn_create_fused_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + size_t num_post_operations, + struct xnn_post_operation* post_operations, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nhwc_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f32( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f32( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_divide_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_setup_divide_nd_f32( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_divide_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_elu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_setup_elu_nc_f32( + xnn_operator_t elu_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_elu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float alpha, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_floor_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_setup_floor_nc_f32( + xnn_operator_t floor_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_floor_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_f32( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + const xnn_caches_t caches, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_f32( + xnn_operator_t fully_connected_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_nwc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_hardswish_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_setup_hardswish_nc_f32( + xnn_operator_t hardswish_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_hardswish_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_setup_leaky_relu_nc_f32( + xnn_operator_t leaky_relu_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_leaky_relu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float negative_slope, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f32( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_maximum_nd_f32( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_setup_maximum_nd_f32( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_maximum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_minimum_nd_f32( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_setup_minimum_nd_f32( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_minimum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_setup_multiply_nd_f32( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_multiply_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_negate_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_setup_negate_nc_f32( + xnn_operator_t negate_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_negate_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_prelu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + const float* negative_slope, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_setup_prelu_nc_f32( + xnn_operator_t prelu_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f32( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_setup_sigmoid_nc_f32( + xnn_operator_t sigmoid_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_sigmoid_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_softmax_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_setup_softmax_nc_f32( + xnn_operator_t softmax_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_setup_square_nc_f32( + xnn_operator_t square_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_square_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_root_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_setup_square_root_nc_f32( + xnn_operator_t sqrt_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_square_root_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_squared_difference_nd_f32( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_setup_squared_difference_nd_f32( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_squared_difference_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_setup_subtract_nd_f32( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_subtract_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_truncation_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_setup_truncation_nc_f32( + xnn_operator_t truncation_op, + size_t batch_size, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_truncation_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +#ifndef XNN_NO_NCHW_OPERATORS + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x32( + size_t output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nchw_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nchw_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_ncw_f32( + size_t channels, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const float* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f32( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const float* input, + float* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_NCHW_OPERATORS + +#endif // XNN_NO_F32_OPERATORS + +#ifndef XNN_NO_X32_OPERATORS + +enum xnn_status xnn_create_channel_shuffle_nc_x32( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_setup_channel_shuffle_nc_x32( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x32( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_setup_constant_pad_nd_x32( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_constant_pad_nd_x32( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_copy_nc_x32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_setup_copy_nc_x32( + xnn_operator_t copy_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_copy_nc_x32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint32_t* input, + uint32_t* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_depth_to_space_nhwc_x32( + size_t output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_slice_nd_x32( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_setup_slice_nd_x32( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_slice_nd_x32( + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_space_to_depth_nhwc_x32( + size_t input_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x32( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x32( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_setup_transpose_nd_x32( + xnn_operator_t transpose_op, + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_transpose_nd_x32( + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_unpooling2d_nhwc_x32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* unpooling_op_out); + +enum xnn_status xnn_setup_unpooling2d_nhwc_x32( + xnn_operator_t unpooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + const uint32_t* index, + void* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_X32_OPERATORS + +#ifndef XNN_NO_F16_OPERATORS + +enum xnn_status xnn_create_abs_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_setup_abs_nc_f16( + xnn_operator_t abs_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_setup_add_nd_f16( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f16( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_bankers_rounding_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_setup_bankers_rounding_nc_f16( + xnn_operator_t rounding_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_ceiling_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_setup_ceiling_nc_f16( + xnn_operator_t ceiling_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_clamp_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_setup_clamp_nc_f16( + xnn_operator_t clamp_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nhwc_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f16( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f16( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_divide_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_setup_divide_nd_f16( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_elu_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_setup_elu_nc_f16( + xnn_operator_t elu_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_floor_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_setup_floor_nc_f16( + xnn_operator_t floor_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_f16( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_f16( + xnn_operator_t fully_connected_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_nwc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_hardswish_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_setup_hardswish_nc_f16( + xnn_operator_t hardswish_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_setup_leaky_relu_nc_f16( + xnn_operator_t leaky_relu_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f16( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_maximum_nd_f16( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_setup_maximum_nd_f16( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_minimum_nd_f16( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_setup_minimum_nd_f16( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_setup_multiply_nd_f16( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_negate_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_setup_negate_nc_f16( + xnn_operator_t negate_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_prelu_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + const void* negative_slope, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_setup_prelu_nc_f16( + xnn_operator_t prelu_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f16( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_setup_sigmoid_nc_f16( + xnn_operator_t sigmoid_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_softmax_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_setup_softmax_nc_f16( + xnn_operator_t softmax_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_setup_square_nc_f16( + xnn_operator_t square_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_root_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_setup_square_root_nc_f16( + xnn_operator_t sqrt_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_squared_difference_nd_f16( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_setup_squared_difference_nd_f16( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_setup_subtract_nd_f16( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const void* input1, + const void* input2, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_truncation_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_setup_truncation_nc_f16( + xnn_operator_t truncation_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +#ifndef XNN_NO_NCHW_OPERATORS + +enum xnn_status xnn_create_convolution2d_nchw_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nchw_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x16( + size_t output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_ncw_f16( + size_t channels, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f16( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_NCHW_OPERATORS + +#endif // XNN_NO_F16_OPERATORS + +#ifndef XNN_NO_X16_OPERATORS + +enum xnn_status xnn_create_constant_pad_nd_x16( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_setup_constant_pad_nd_x16( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_constant_pad_nd_x16( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_copy_nc_x16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_setup_copy_nc_x16( + xnn_operator_t copy_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_depth_to_space_nhwc_x16( + size_t output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_slice_nd_x16( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_setup_slice_nd_x16( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_space_to_depth_nhwc_x16( + size_t input_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x16( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x16( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_setup_transpose_nd_x16( + xnn_operator_t transpose_op, + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_transpose_nd_x16( + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +#endif // XNN_NO_X16_OPERATORS + +#ifndef XNN_NO_QC8_OPERATORS + +enum xnn_status xnn_create_convolution2d_nhwc_qc8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + const float* kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nhwc_qc8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_QC8_OPERATORS + +#ifndef XNN_NO_QS8_OPERATORS + +enum xnn_status xnn_create_add_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_setup_add_nd_qs8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_add_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nhwc_qs8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nhwc_qs8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qs8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qs8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_elu_nc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + float alpha, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_setup_elu_nc_qs8( + xnn_operator_t elu_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qs8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qs8( + xnn_operator_t fully_connected_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_nwc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qs8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_setup_multiply_nd_qs8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_multiply_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + float negative_slope, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_setup_leaky_relu_nc_qs8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_setup_sigmoid_nc_qs8( + xnn_operator_t sigmoid_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_setup_subtract_nd_qs8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_subtract_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_setup_tanh_nc_qs8( + xnn_operator_t tanh_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_QS8_OPERATORS + +#ifndef XNN_NO_QU8_OPERATORS + +enum xnn_status xnn_create_add_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_setup_add_nd_qu8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_add_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_average_pooling2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_qu8( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_setup_convolution2d_nhwc_qu8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qu8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qu8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qu8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_caches_t caches, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qu8( + xnn_operator_t fully_connected_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_global_average_pooling_nwc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qu8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_setup_leaky_relu_nc_qu8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_setup_multiply_nd_qu8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_multiply_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_setup_sigmoid_nc_qu8( + xnn_operator_t sigmoid_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_softmax_nc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_setup_softmax_nc_qu8( + xnn_operator_t softmax_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_setup_subtract_nd_qu8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_subtract_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_setup_tanh_nc_qu8( + xnn_operator_t tanh_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_QU8_OPERATORS + +#ifndef XNN_NO_S8_OPERATORS + +enum xnn_status xnn_create_clamp_nc_s8( + size_t channels, + size_t input_stride, + size_t output_stride, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_setup_clamp_nc_s8( + xnn_operator_t clamp_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_max_pooling2d_nhwc_s8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_s8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_s8( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_s8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_S8_OPERATORS + +#ifndef XNN_NO_U8_OPERATORS + +enum xnn_status xnn_create_clamp_nc_u8( + size_t channels, + size_t input_stride, + size_t output_stride, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_setup_clamp_nc_u8( + xnn_operator_t clamp_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_u8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_u8( + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_u8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t output_height, + size_t output_width, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +#endif // XNN_NO_U8_OPERATORS + +#ifndef XNN_NO_X8_OPERATORS + +enum xnn_status xnn_create_copy_nc_x8( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_setup_copy_nc_x8( + xnn_operator_t copy_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_setup_channel_shuffle_nc_x8( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x8( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_setup_constant_pad_nd_x8( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_constant_pad_nd_x8( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_depth_to_space_nhwc_x8( + size_t output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x8( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_slice_nd_x8( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_setup_slice_nd_x8( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_space_to_depth_nhwc_x8( + size_t input_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x8( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + const void* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x8( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_setup_transpose_nd_x8( + xnn_operator_t transpose_op, + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_transpose_nd_x8( + const void* input, + void* output, + const size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +#endif // XNN_NO_X8_OPERATORS + +#ifndef XNN_NO_CVT_OPERATORS + +enum xnn_status xnn_create_convert_nc_f16_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_f16_f32( + xnn_operator_t convert_op, + size_t batch_size, + const void* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_f16_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const void* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_f32_f16( + xnn_operator_t convert_op, + size_t batch_size, + const float* input, + void* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_f32_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_scale, + int8_t output_zero_point, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_f32_qs8( + xnn_operator_t convert_op, + size_t batch_size, + const float* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_f32_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + int8_t* output, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + float output_scale, + uint8_t output_zero_point, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_f32_qu8( + xnn_operator_t convert_op, + size_t batch_size, + const float* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_f32_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + uint8_t* output, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + float input_scale, + int8_t input_zero_point, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_qs8( + xnn_operator_t convert_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float input_scale, + int8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_qs8_f32( + xnn_operator_t convert_op, + size_t batch_size, + const int8_t* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_qs8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const int8_t* input, + float* output, + float input_scale, + int8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + float input_scale, + uint8_t input_zero_point, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_qu8( + xnn_operator_t convert_op, + size_t batch_size, + const uint8_t* input, + uint8_t* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qu8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_setup_convert_nc_qu8_f32( + xnn_operator_t convert_op, + size_t batch_size, + const uint8_t* input, + float* output, + pthreadpool_t threadpool); + +enum xnn_status xnn_run_convert_nc_qu8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint8_t* input, + float* output, + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +#endif // XNN_NO_CVT_OPERATORS + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libc10_cuda.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libc10_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..955bc8a2bf3dbe00329fe8df338fe52ae6c52360 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libc10_cuda.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so new file mode 100644 index 0000000000000000000000000000000000000000..6327e64186148838900fe219ac2ca2cd69d0e3da Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libcaffe2_nvrtc.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 b/env-llmeval/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..346f88be2766ed671dd3f9d187dedd3fcd6f9e59 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libshm.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libshm.so new file mode 100644 index 0000000000000000000000000000000000000000..2379771c3451f13a0b197433d3909bf445eb3d6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libshm.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch.so new file mode 100644 index 0000000000000000000000000000000000000000..0303884bb0f5fdd9a655a0b1249107178eec38ba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so new file mode 100644 index 0000000000000000000000000000000000000000..83dc8db4fda0a3eaf07e69b8f762989b2f92a3cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/lib/libtorch_global_deps.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.py new file mode 100644 index 0000000000000000000000000000000000000000..181008252d803de6d90c31cdda37aa36cde2004b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.py @@ -0,0 +1,316 @@ +import torch +from torch import Tensor + +from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach, + _differentiable_doc, _foreach_doc, _maximize_doc, _view_as_real) +from typing import List, Optional + +__all__ = ["Adadelta", "adadelta"] + + +class Adadelta(Optimizer): + def __init__( + self, + params, + lr=1.0, + rho=0.9, + eps=1e-6, + weight_decay=0, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= rho <= 1.0: + raise ValueError(f"Invalid rho value: {rho}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + maximize=maximize, + foreach=foreach, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + + def _init_group(self, group, params_with_grad, grads, square_avgs, acc_deltas): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("Adadelta does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # Lazy state initialization + if len(state) == 0: + state["step"] = 0 + state["square_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + state["acc_delta"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + square_avgs.append(state["square_avg"]) + acc_deltas.append(state["acc_delta"]) + + state["step"] += 1 + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + square_avgs = [] + acc_deltas = [] + lr, rho, eps, weight_decay, foreach, maximize, differentiable = ( + group["lr"], + group["rho"], + group["eps"], + group["weight_decay"], + group["foreach"], + group["maximize"], + group["differentiable"], + ) + + has_complex = self._init_group(group, params_with_grad, grads, square_avgs, acc_deltas) + + adadelta( + params_with_grad, + grads, + square_avgs, + acc_deltas, + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + return loss + + +Adadelta.__doc__ = r"""Implements Adadelta algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, + \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)}, + \: \lambda \text{ (weight decay)} \\ + &\textbf{initialize} : v_0 \leftarrow 0 \: \text{ (square avg)}, + \: u_0 \leftarrow 0 \: \text{ (accumulate variables)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} v_t \leftarrow v_{t-1} \rho + g^2_t (1 - \rho) \\ + &\hspace{5mm}\Delta x_t \leftarrow \frac{\sqrt{u_{t-1} + + \epsilon }}{ \sqrt{v_t + \epsilon} }g_t \hspace{21mm} \\ + &\hspace{5mm} u_t \leftarrow u_{t-1} \rho + + \Delta x^2_t (1 - \rho) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \Delta x_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + rho (float, optional): coefficient used for computing a running average + of squared gradients (default: 0.9). A higher value of `rho` will + result in a slower average, which can be helpful for preventing + oscillations in the learning process. + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-6). + lr (float, optional): coefficient that scale delta before it is applied + to the parameters (default: 1.0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + .. _ADADELTA\: An Adaptive Learning Rate Method: + https://arxiv.org/abs/1212.5701 + + """ + + +def adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + rho: float, + eps: float, + weight_decay: float, + maximize: bool, +): + r"""Functional API that performs Adadelta algorithm computation. + + See :class:`~torch.optim.Adadelta` for details. + """ + # We still respect when the user inputs False for foreach. + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adadelta + else: + func = _single_tensor_adadelta + + func( + params, + grads, + square_avgs, + acc_deltas, + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _single_tensor_adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + *, + lr: float, + rho: float, + eps: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for (param, grad, square_avg, acc_delta) in zip( + params, grads, square_avgs, acc_deltas + ): + grad = grad if not maximize else -grad + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + square_avg = torch.view_as_real(square_avg) + acc_delta = torch.view_as_real(acc_delta) + grad = torch.view_as_real(grad) + + square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho) + std = square_avg.add(eps).sqrt_() + delta = acc_delta.add(eps).sqrt_() + if differentiable: + delta = delta.clone() + delta.div_(std).mul_(grad) + acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho) + + if torch.is_complex(param): + delta = torch.view_as_complex(delta) + param.add_(delta, alpha=-lr) + + +def _multi_tensor_adadelta( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + acc_deltas: List[Tensor], + *, + lr: float, + weight_decay: float, + rho: float, + eps: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + assert not differentiable, "_foreach ops don't support autograd" + + if len(params) == 0: + return + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, square_avgs, acc_deltas]) + for ((device_params, device_grads, device_square_avgs, device_acc_deltas), _) in grouped_tensors.values(): + if maximize: + device_grads = torch._foreach_neg(device_grads) + + if has_complex: + _view_as_real(device_params, device_grads, device_square_avgs, device_acc_deltas) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + torch._foreach_mul_(device_square_avgs, rho) + torch._foreach_addcmul_(device_square_avgs, device_grads, device_grads, value=1 - rho) + + std = torch._foreach_add(device_square_avgs, eps) + torch._foreach_sqrt_(std) + + deltas = torch._foreach_add(device_acc_deltas, eps) + torch._foreach_sqrt_(deltas) + torch._foreach_div_(deltas, std) + torch._foreach_mul_(deltas, device_grads) + + torch._foreach_add_(device_params, deltas, alpha=-lr) + + torch._foreach_mul_(device_acc_deltas, rho) + torch._foreach_addcmul_(device_acc_deltas, deltas, deltas, value=1 - rho) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.py new file mode 100644 index 0000000000000000000000000000000000000000..c0b9362abadd8027d7e0a36e299d65e9b5b8e654 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.py @@ -0,0 +1,354 @@ +import torch +from torch import Tensor + +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _stack_if_compiling, + _default_to_fused_or_foreach, _differentiable_doc, _maximize_doc, _foreach_doc, + _view_as_real) +from typing import List, Optional + +__all__ = ["Adamax", "adamax"] + + +class Adamax(Optimizer): + def __init__( + self, + params, + lr=2e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32) + + def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("Adamax does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = torch.tensor(0.0, dtype=torch.float32) + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + state["exp_inf"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_infs.append(state["exp_inf"]) + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_infs = [] + state_steps = [] + + beta1, beta2 = group["betas"] + eps = group["eps"] + lr = group["lr"] + weight_decay = group["weight_decay"] + foreach = group["foreach"] + maximize = group["maximize"] + differentiable = group["differentiable"] + + has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps) + + adamax( + params_with_grad, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=eps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + return loss + + +Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, + \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + """ + + +def adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + has_complex: bool = False, + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, +): + r"""Functional API that performs adamax algorithm computation. + + See :class:`~torch.optim.Adamax` for details. + """ + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adamax + else: + func = _single_tensor_adamax + + func( + params, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=eps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _single_tensor_adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + exp_avg = exp_avgs[i] + exp_inf = exp_infs[i] + step_t = state_steps[i] + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_inf = torch.view_as_real(exp_inf) + + # Update biased first moment estimate. + exp_avg.lerp_(grad, 1 - beta1) + # Update the exponentially weighted infinity norm. + norm_buf = torch.cat( + [exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0 + ) + + if not differentiable: + torch.amax(norm_buf, 0, keepdim=False, out=exp_inf) + else: + exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False)) + + bias_correction = 1 - beta1 ** _get_value(step_t) + clr = lr / bias_correction + + param.addcdiv_(exp_avg, exp_inf, value=-clr) + + +def _multi_tensor_adamax( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_infs: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + assert not differentiable, "_foreach ops don't support autograd" + + if len(params) == 0: + return + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_infs, state_steps]) + for ((grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs, grouped_state_steps), _) in grouped_tensors.values(): + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) + + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if weight_decay != 0: + if maximize: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + else: + grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) + + # Update biased first moment estimate. + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + # Update the exponentially weighted infinity norm. + torch._foreach_mul_(grouped_exp_infs, beta2) + + for exp_inf, grad in zip(grouped_exp_infs, grouped_grads): + norm_buf = torch.cat( + [exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0 + ) + torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) + + bias_corrections = [1 - beta1 ** _get_value(step) for step in grouped_state_steps] + clr = _stack_if_compiling([-1 * (lr / bias_correction) for bias_correction in bias_corrections]) + + torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, grouped_exp_infs, clr) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d38cfaefe388cd703f4a816459f572216226bf4f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamax.pyi @@ -0,0 +1,13 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class Adamax(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c7c0ac060881ad77b02d8197f41dd03359cee31d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.pyi @@ -0,0 +1,16 @@ +from typing import Optional + +from .optimizer import Optimizer, ParamsT + +class LBFGS(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + max_iter: int = ..., + max_eval: Optional[int] = ..., + tolerance_grad: float = ..., + tolerance_change: float = ..., + history_size: int = ..., + line_search_fn: Optional[str] = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2446c80bc547ee5257fc58edc806e911859a8d3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.pyi @@ -0,0 +1,251 @@ +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +from .optimizer import Optimizer + +class LRScheduler: + optimizer: Optimizer = ... + base_lrs: List[float] = ... + last_epoch: int = ... + verbose: bool = ... + def __init__( + self, + optimizer: Optimizer, + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + def state_dict(self) -> Dict[str, Any]: ... + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: ... + def get_last_lr(self) -> List[float]: ... + def get_lr(self) -> float: ... + def step(self, epoch: Optional[int] = ...) -> None: ... + def print_lr( + self, + is_verbose: bool, + group: Dict[str, Any], + lr: float, + epoch: Optional[int] = ..., + ) -> None: ... + +class _LRScheduler(LRScheduler): ... + +class LambdaLR(LRScheduler): + lr_lambdas: List[Callable[[int], float]] = ... + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class MultiplicativeLR(LRScheduler): + lr_lambdas: List[Callable[[int], float]] = ... + def __init__( + self, + optimizer: Optimizer, + lr_lambda: Union[Callable[[int], float], List[Callable[[int], float]]], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class StepLR(LRScheduler): + step_size: int = ... + gamma: float = ... + def __init__( + self, + optimizer: Optimizer, + step_size: int, + gamma: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class MultiStepLR(LRScheduler): + milestones: Iterable[int] = ... + gamma: float = ... + def __init__( + self, + optimizer: Optimizer, + milestones: Iterable[int], + gamma: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class ConstantLR(LRScheduler): + factor: float = ... + total_iters: int = ... + def __init__( + self, + optimizer: Optimizer, + factor: float = ..., + total_iters: int = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class LinearLR(LRScheduler): + start_factor: float = ... + end_factor: float = ... + total_iters: int = ... + def __init__( + self, + optimizer: Optimizer, + start_factor: float = ..., + end_factor: float = ..., + total_iters: int = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class ExponentialLR(LRScheduler): + gamma: float = ... + def __init__( + self, + optimizer: Optimizer, + gamma: float, + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class ChainedScheduler(LRScheduler): + def __init__(self, schedulers: List[LRScheduler]) -> None: ... + +class SequentialLR(LRScheduler): + def __init__( + self, + optimizer: Optimizer, + schedulers: List[LRScheduler], + milestones: List[int], + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class CosineAnnealingLR(LRScheduler): + T_max: int = ... + eta_min: float = ... + def __init__( + self, + optimizer: Optimizer, + T_max: int, + eta_min: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class ReduceLROnPlateau(LRScheduler): + factor: float = ... + optimizer: Optimizer = ... + min_lrs: List[float] = ... + patience: int = ... + verbose: bool = ... + cooldown: int = ... + cooldown_counter: int = ... + mode: str = ... + threshold: float = ... + threshold_mode: str = ... + best: Optional[float] = ... + num_bad_epochs: Optional[int] = ... + mode_worse: Optional[float] = ... + eps: float = ... + last_epoch: int = ... + def __init__( + self, + optimizer: Optimizer, + mode: str = ..., + factor: float = ..., + patience: int = ..., + threshold: float = ..., + threshold_mode: str = ..., + cooldown: int = ..., + min_lr: Union[List[float], float] = ..., + eps: float = ..., + verbose: bool = ..., + ) -> None: ... + def step(self, metrics: Any, epoch: Optional[int] = ...) -> None: ... # type: ignore[override] + @property + def in_cooldown(self) -> bool: ... + def is_better(self, a: Any, best: Any) -> bool: ... + def state_dict(self) -> Dict[str, Any]: ... + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: ... + +class CyclicLR(LRScheduler): + max_lrs: List[float] = ... + total_size: float = ... + step_ratio: float = ... + mode: str = ... + gamma: float = ... + scale_mode: str = ... + cycle_momentum: bool = ... + base_momentums: List[float] = ... + max_momentums: List[float] = ... + def __init__( + self, + optimizer: Optimizer, + base_lr: Union[float, List[float]], + max_lr: Union[float, List[float]], + step_size_up: int = ..., + step_size_down: Optional[int] = ..., + mode: str = ..., + gamma: float = ..., + scale_fn: Optional[Callable[[float], float]] = ..., + scale_mode: str = ..., + cycle_momentum: bool = ..., + base_momentum: float = ..., + max_momentum: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + def scale_fn(self, x: Any) -> float: ... + +class CosineAnnealingWarmRestarts(LRScheduler): + T_0: int = ... + T_i: int = ... + T_mult: int = ... + eta_min: float = ... + T_cur: Any = ... + def __init__( + self, + optimizer: Optimizer, + T_0: int, + T_mult: int = ..., + eta_min: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class OneCycleLR(LRScheduler): + total_steps: int = ... + anneal_func: Callable[[float, float, float], float] = ... + cycle_momentum: bool = ... + use_beta1: bool = ... + def __init__( + self, + optimizer: Optimizer, + max_lr: Union[float, List[float]], + total_steps: int = ..., + epochs: int = ..., + steps_per_epoch: int = ..., + pct_start: float = ..., + anneal_strategy: str = ..., + cycle_momentum: bool = ..., + base_momentum: Union[float, List[float]] = ..., + max_momentum: Union[float, List[float]] = ..., + div_factor: float = ..., + final_div_factor: float = ..., + three_phase: bool = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... + +class PolynomialLR(LRScheduler): + total_iters: int = ... + power: float = ... + def __init__( + self, + optimizer: Optimizer, + total_iters: int = ..., + power: float = ..., + last_epoch: int = ..., + verbose: bool = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7e0f737b95579f1f42f4648b458ed768762c57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.py @@ -0,0 +1,376 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _default_to_fused_or_foreach, _use_grad_for_differentiable, + _differentiable_doc, _foreach_doc, _maximize_doc, _view_as_real) +from typing import List, Optional + +__all__ = ["RMSprop", "rmsprop"] + + +class RMSprop(Optimizer): + def __init__( + self, + params, + lr=1e-2, + alpha=0.99, + eps=1e-8, + weight_decay=0, + momentum=0, + centered=False, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= momentum: + raise ValueError(f"Invalid momentum value: {momentum}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= alpha: + raise ValueError(f"Invalid alpha value: {alpha}") + + defaults = dict( + lr=lr, + momentum=momentum, + alpha=alpha, + eps=eps, + centered=centered, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("momentum", 0) + group.setdefault("centered", False) + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + + def _init_group(self, group, params_with_grad, grads, square_avgs, momentum_buffer_list, grad_avgs): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + + if p.grad.is_sparse: + raise RuntimeError("RMSprop does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + state["square_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if group["momentum"] > 0: + state["momentum_buffer"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if group["centered"]: + state["grad_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + square_avgs.append(state["square_avg"]) + + if group["momentum"] > 0: + momentum_buffer_list.append(state["momentum_buffer"]) + if group["centered"]: + grad_avgs.append(state["grad_avg"]) + + if group["differentiable"] and isinstance(state["step"], Tensor): + raise RuntimeError("`step` can't be a tensor") + + state["step"] += 1 + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + square_avgs = [] + grad_avgs = [] + momentum_buffer_list = [] + + has_complex = self._init_group(group, params_with_grad, grads, square_avgs, momentum_buffer_list, grad_avgs) + + rmsprop( + params_with_grad, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + lr=group["lr"], + alpha=group["alpha"], + eps=group["eps"], + weight_decay=group["weight_decay"], + momentum=group["momentum"], + centered=group["centered"], + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + has_complex=has_complex, + ) + + return loss + + +RMSprop.__doc__ = r"""Implements RMSprop algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\ + &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \: + \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t + \hspace{8mm} \\ + &\hspace{5mm} \tilde{v_t} \leftarrow v_t \\ + &\hspace{5mm}if \: centered \\ + &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\ + &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\ + &\hspace{5mm}if \: \mu > 0 \\ + &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} + + g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\ + &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\ + &\hspace{5mm} else \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - + \gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to + `lecture notes `_ by G. Hinton. + and centered version `Generating Sequences + With Recurrent Neural Networks `_. + The implementation here takes the square root of the gradient average before + adding epsilon (note that TensorFlow interchanges these two operations). The effective + learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma` + is the scheduled learning rate and :math:`v` is the weighted moving average + of the squared gradient. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing constant (default: 0.99) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + """ + + +def rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, +): + r"""Functional API that performs rmsprop algorithm computation. + See :class:`~torch.optim.RMSProp` for details. + """ + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_rmsprop + else: + func = _single_tensor_rmsprop + + func( + params, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + lr=lr, + alpha=alpha, + eps=eps, + weight_decay=weight_decay, + momentum=momentum, + centered=centered, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _single_tensor_rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + square_avg = square_avgs[i] + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + is_complex_param = torch.is_complex(param) + if is_complex_param: + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + square_avg = torch.view_as_real(square_avg) + + square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) + + if centered: + grad_avg = grad_avgs[i] + if is_complex_param: + grad_avg = torch.view_as_real(grad_avg) + grad_avg.lerp_(grad, 1 - alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_() + else: + avg = square_avg.sqrt() + + if differentiable: + avg = avg.add(eps) + else: + avg = avg.add_(eps) + + if momentum > 0: + buf = momentum_buffer_list[i] + if is_complex_param: + buf = torch.view_as_real(buf) + buf.mul_(momentum).addcdiv_(grad, avg) + param.add_(buf, alpha=-lr) + else: + param.addcdiv_(grad, avg, value=-lr) + + +def _multi_tensor_rmsprop( + params: List[Tensor], + grads: List[Tensor], + square_avgs: List[Tensor], + grad_avgs: List[Tensor], + momentum_buffer_list: List[Tensor], + *, + lr: float, + alpha: float, + eps: float, + weight_decay: float, + momentum: float, + centered: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, square_avgs, grad_avgs, momentum_buffer_list]) + for (((grouped_params, grouped_grads, grouped_square_avgs, grouped_grad_avgs, + grouped_momentum_buffer_list)), _) in grouped_tensors.values(): + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) + + if weight_decay != 0: + # Re-use the intermediate memory (grouped_grads) already allocated for maximize + if maximize: + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + else: + grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) + + grouped_grads = list(grouped_grads) + + if has_complex: + state_and_grads = [grouped_grads, grouped_square_avgs] + if momentum > 0: + state_and_grads.append(grouped_momentum_buffer_list) + if centered: + state_and_grads.append(grouped_grad_avgs) + _view_as_real(grouped_params, *state_and_grads) + + torch._foreach_mul_(grouped_square_avgs, alpha) + torch._foreach_addcmul_(grouped_square_avgs, grouped_grads, grouped_grads, value=1 - alpha) + + if centered: + torch._foreach_lerp_(grouped_grad_avgs, grouped_grads, 1 - alpha) + avg = torch._foreach_addcmul(grouped_square_avgs, grouped_grad_avgs, grouped_grad_avgs, value=-1) + torch._foreach_sqrt_(avg) + torch._foreach_add_(avg, eps) + else: + avg = torch._foreach_sqrt(grouped_square_avgs) + torch._foreach_add_(avg, eps) + + if momentum > 0: + torch._foreach_mul_(grouped_momentum_buffer_list, momentum) + torch._foreach_addcdiv_(grouped_momentum_buffer_list, grouped_grads, avg) + torch._foreach_add_(grouped_params, grouped_momentum_buffer_list, alpha=-lr) + else: + torch._foreach_addcdiv_(grouped_params, grouped_grads, avg, value=-lr) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..8c7d73b83a2b45b7bc7ea54e26ebea733d8e8eb4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.py @@ -0,0 +1,331 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach, + _differentiable_doc, _foreach_doc, _maximize_doc) +from typing import List, Optional + +__all__ = ['SGD', 'sgd'] + + +class SGD(Optimizer): + def __init__(self, params, lr=1e-3, momentum=0, dampening=0, + weight_decay=0, nesterov=False, *, maximize: bool = False, foreach: Optional[bool] = None, + differentiable: bool = False): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict(lr=lr, momentum=momentum, dampening=dampening, + weight_decay=weight_decay, nesterov=nesterov, + maximize=maximize, foreach=foreach, + differentiable=differentiable) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('nesterov', False) + group.setdefault('maximize', False) + group.setdefault('foreach', None) + group.setdefault('differentiable', False) + + def _init_group(self, group, params_with_grad, d_p_list, momentum_buffer_list): + has_sparse_grad = False + + for p in group['params']: + if p.grad is not None: + params_with_grad.append(p) + d_p_list.append(p.grad) + if p.grad.is_sparse: + has_sparse_grad = True + + state = self.state[p] + if 'momentum_buffer' not in state: + momentum_buffer_list.append(None) + else: + momentum_buffer_list.append(state['momentum_buffer']) + + return has_sparse_grad + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + d_p_list = [] + momentum_buffer_list = [] + + has_sparse_grad = self._init_group(group, params_with_grad, d_p_list, momentum_buffer_list) + + sgd(params_with_grad, + d_p_list, + momentum_buffer_list, + weight_decay=group['weight_decay'], + momentum=group['momentum'], + lr=group['lr'], + dampening=group['dampening'], + nesterov=group['nesterov'], + maximize=group['maximize'], + has_sparse_grad=has_sparse_grad, + foreach=group['foreach']) + + # update momentum_buffers in state + for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list): + state = self.state[p] + state['momentum_buffer'] = momentum_buffer + + return loss + + +SGD.__doc__ = r"""Implements stochastic gradient descent (optionally with momentum). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)}, + \:\textit{ nesterov,}\:\textit{ maximize} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}\textbf{if} \: \mu \neq 0 \\ + &\hspace{10mm}\textbf{if} \: t > 1 \\ + &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} \textbf{b}_t \leftarrow g_t \\ + &\hspace{10mm}\textbf{if} \: \textit{nesterov} \\ + &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t \\ + &\hspace{10mm}\textbf{else} \\[-1.ex] + &\hspace{15mm} g_t \leftarrow \textbf{b}_t \\ + &\hspace{5mm}\textbf{if} \: \textit{maximize} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t \\[-1.ex] + &\hspace{5mm}\textbf{else} \\[-1.ex] + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + Nesterov momentum is based on the formula from + `On the importance of initialization and momentum in deep learning`__. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + {_maximize_doc} + {_foreach_doc} + {_differentiable_doc} + """ + r""" + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + + __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf + + .. note:: + The implementation of SGD with Momentum/Nesterov subtly differs from + Sutskever et. al. and implementations in some other frameworks. + + Considering the specific case of Momentum, the update can be written as + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + g_{t+1}, \\ + p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, + \end{aligned} + + where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the + parameters, gradient, velocity, and momentum respectively. + + This is in contrast to Sutskever et. al. and + other frameworks which employ an update of the form + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\ + p_{t+1} & = p_{t} - v_{t+1}. + \end{aligned} + + The Nesterov version is analogously modified. + + Moreover, the initial value of the momentum buffer is set to the + gradient value at the first step. This is in contrast to some other + frameworks that initialize it to all zeros. + + """ + + +def sgd(params: List[Tensor], + d_p_list: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + has_sparse_grad: bool = None, + foreach: Optional[bool] = None, + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool): + r"""Functional API that performs SGD algorithm computation. + + See :class:`~torch.optim.SGD` for details. + """ + + if foreach is None: + # why must we be explicit about an if statement for torch.jit.is_scripting here? + # because JIT can't handle Optionals nor fancy conditionals when scripting + if not torch.jit.is_scripting(): + _, foreach = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False) + else: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError('torch.jit.script not supported with foreach optimizers') + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_sgd + else: + func = _single_tensor_sgd + + func(params, + d_p_list, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=nesterov, + has_sparse_grad=has_sparse_grad, + maximize=maximize) + +def _single_tensor_sgd(params: List[Tensor], + d_p_list: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, + has_sparse_grad: bool): + + for i, param in enumerate(params): + d_p = d_p_list[i] if not maximize else -d_p_list[i] + + if weight_decay != 0: + d_p = d_p.add(param, alpha=weight_decay) + + if momentum != 0: + buf = momentum_buffer_list[i] + + if buf is None: + buf = torch.clone(d_p).detach() + momentum_buffer_list[i] = buf + else: + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + + if nesterov: + d_p = d_p.add(buf, alpha=momentum) + else: + d_p = buf + + param.add_(d_p, alpha=-lr) + + +def _multi_tensor_sgd(params: List[Tensor], + grads: List[Tensor], + momentum_buffer_list: List[Optional[Tensor]], + *, + weight_decay: float, + momentum: float, + lr: float, + dampening: float, + nesterov: bool, + maximize: bool, + has_sparse_grad: bool): + + if len(params) == 0: + return + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, momentum_buffer_list], with_indices=True) + for ((device_params, device_grads, device_momentum_buffer_list), indices) in grouped_tensors.values(): + device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads) + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + if momentum != 0: + bufs = [] + + all_states_with_momentum_buffer = True + for i in range(len(device_momentum_buffer_list)): + if device_momentum_buffer_list[i] is None: + all_states_with_momentum_buffer = False + break + else: + bufs.append(device_momentum_buffer_list[i]) + + if all_states_with_momentum_buffer: + torch._foreach_mul_(bufs, momentum) + torch._foreach_add_(bufs, device_grads, alpha=1 - dampening) + else: + bufs = [] + for i in range(len(device_momentum_buffer_list)): + if device_momentum_buffer_list[i] is None: + buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = \ + torch.clone(device_grads[i]).detach() + else: + buf = device_momentum_buffer_list[i] + buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening) + + bufs.append(buf) + + if nesterov: + torch._foreach_add_(device_grads, bufs, alpha=momentum) + else: + device_grads = bufs + + if not device_has_sparse_grad: + torch._foreach_add_(device_params, device_grads, alpha=-lr) + else: + # foreach APIs don't support sparse + for i in range(len(device_params)): + device_params[i].add_(device_grads[i], alpha=-lr) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..90b3f1598de914cd932a1f93eca679281cf34485 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.py @@ -0,0 +1,377 @@ +import itertools +import math +from copy import deepcopy +import warnings + +import torch +from torch.nn import Module +from torch.optim.lr_scheduler import LRScheduler +from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices + +__all__ = [ + 'AveragedModel', + 'update_bn', + 'SWALR', + 'get_ema_multi_avg_fn', + 'get_swa_multi_avg_fn', + 'get_ema_avg_fn', + 'get_swa_avg_fn' +] + +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype + + +def get_ema_multi_avg_fn(decay=0.999): + @torch.no_grad() + def ema_update(ema_param_list, current_param_list, _): + # foreach lerp only handles float and complex + if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(ema_param_list[0]): + torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay) + else: + for p_ema, p_model in zip(ema_param_list, current_param_list): + p_ema.copy_(p_ema * decay + p_model * (1 - decay)) + + return ema_update + + +def get_swa_multi_avg_fn(): + @torch.no_grad() + def swa_update(averaged_param_list, current_param_list, num_averaged): + # foreach lerp only handles float and complex + if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(averaged_param_list[0]): + torch._foreach_lerp_(averaged_param_list, current_param_list, 1 / (num_averaged + 1)) + else: + diffs = torch._foreach_sub(current_param_list, averaged_param_list) + torch._foreach_addcdiv_(averaged_param_list, diffs, [num_averaged + 1] * len(averaged_param_list)) + + return swa_update + + +def get_ema_avg_fn(decay=0.999): + @torch.no_grad() + def ema_update(ema_param, current_param, num_averaged): + return decay * ema_param + (1 - decay) * current_param + + return ema_update + + +def get_swa_avg_fn(): + @torch.no_grad() + def swa_update(averaged_param, current_param, num_averaged): + return averaged_param + (current_param - averaged_param) / (num_averaged + 1) + + return swa_update + + +class AveragedModel(Module): + r"""Implements averaged model for Stochastic Weight Averaging (SWA) and + Exponential Moving Average (EMA). + + Stochastic Weight Averaging was proposed in `Averaging Weights Leads to + Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii + Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson + (UAI 2018). + + Exponential Moving Average is a variation of `Polyak averaging`_, + but using exponential weights instead of equal weights across iterations. + + AveragedModel class creates a copy of the provided module :attr:`model` + on the device :attr:`device` and allows to compute running averages of the + parameters of the :attr:`model`. + + Args: + model (torch.nn.Module): model to use with SWA/EMA + device (torch.device, optional): if provided, the averaged model will be + stored on the :attr:`device` + avg_fn (function, optional): the averaging function used to update + parameters; the function must take in the current value of the + :class:`AveragedModel` parameter, the current value of :attr:`model` + parameter, and the number of models already averaged; if None, + an equally weighted average is used (default: None) + multi_avg_fn (function, optional): the averaging function used to update + parameters inplace; the function must take in the current values of the + :class:`AveragedModel` parameters as a list, the current values of :attr:`model` + parameters as a list, and the number of models already averaged; if None, + an equally weighted average is used (default: None) + use_buffers (bool): if ``True``, it will compute running averages for + both the parameters and the buffers of the model. (default: ``False``) + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> loader, optimizer, model, loss_fn = ... + >>> swa_model = torch.optim.swa_utils.AveragedModel(model) + >>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, + >>> T_max=300) + >>> swa_start = 160 + >>> swa_scheduler = SWALR(optimizer, swa_lr=0.05) + >>> for i in range(300): + >>> for input, target in loader: + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + >>> if i > swa_start: + >>> swa_model.update_parameters(model) + >>> swa_scheduler.step() + >>> else: + >>> scheduler.step() + >>> + >>> # Update bn statistics for the swa_model at the end + >>> torch.optim.swa_utils.update_bn(loader, swa_model) + + You can also use custom averaging functions with the `avg_fn` or `multi_avg_fn` parameters. + If no averaging function is provided, the default is to compute + equally-weighted average of the weights (SWA). + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> # Compute exponential moving averages of the weights and buffers + >>> ema_model = torch.optim.swa_utils.AveragedModel(model, + >>> torch.optim.swa_utils.get_ema_multi_avg_fn(0.9), use_buffers=True) + + .. note:: + When using SWA/EMA with models containing Batch Normalization you may + need to update the activation statistics for Batch Normalization. + This can be done either by using the :meth:`torch.optim.swa_utils.update_bn` + or by setting :attr:`use_buffers` to `True`. The first approach updates the + statistics in a post-training step by passing data through the model. The + second does it during the parameter update phase by averaging all buffers. + Empirical evidence has shown that updating the statistics in normalization + layers increases accuracy, but you may wish to empirically test which + approach yields the best results in your problem. + + .. note:: + :attr:`avg_fn` and `multi_avg_fn` are not saved in the :meth:`state_dict` of the model. + + .. note:: + When :meth:`update_parameters` is called for the first time (i.e. + :attr:`n_averaged` is `0`) the parameters of `model` are copied + to the parameters of :class:`AveragedModel`. For every subsequent + call of :meth:`update_parameters` the function `avg_fn` is used + to update the parameters. + + .. _Averaging Weights Leads to Wider Optima and Better Generalization: + https://arxiv.org/abs/1803.05407 + .. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should + Average: + https://arxiv.org/abs/1806.05594 + .. _SWALP: Stochastic Weight Averaging in Low-Precision Training: + https://arxiv.org/abs/1904.11943 + .. _Stochastic Weight Averaging in Parallel: Large-Batch Training That + Generalizes Well: + https://arxiv.org/abs/2001.02312 + .. _Polyak averaging: + https://paperswithcode.com/method/polyak-averaging + """ + def __init__(self, model, device=None, avg_fn=None, multi_avg_fn=None, use_buffers=False): + super().__init__() + assert avg_fn is None or multi_avg_fn is None, 'Only one of avg_fn and multi_avg_fn should be provided' + self.module = deepcopy(model) + if device is not None: + self.module = self.module.to(device) + self.register_buffer('n_averaged', + torch.tensor(0, dtype=torch.long, device=device)) + self.avg_fn = avg_fn + self.multi_avg_fn = multi_avg_fn + self.use_buffers = use_buffers + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + def update_parameters(self, model): + self_param = ( + itertools.chain(self.module.parameters(), self.module.buffers()) + if self.use_buffers else self.parameters() + ) + model_param = ( + itertools.chain(model.parameters(), model.buffers()) + if self.use_buffers else model.parameters() + ) + self_param_detached = [] + model_param_detached = [] + for p_averaged, p_model in zip(self_param, model_param): + p_model_ = p_model.detach().to(p_averaged.device) + self_param_detached.append(p_averaged.detach()) + model_param_detached.append(p_model_) + if self.n_averaged == 0: + p_averaged.detach().copy_(p_model_) + + if self.n_averaged > 0: + if self.multi_avg_fn is not None or self.avg_fn is None: + grouped_tensors = _group_tensors_by_device_and_dtype([self_param_detached, model_param_detached]) + for ((device, _), ([self_params, model_params], _)) in grouped_tensors.items(): + if self.multi_avg_fn: + self.multi_avg_fn(self_params, model_params, self.n_averaged.to(device)) + elif device.type in _get_foreach_kernels_supported_devices(): + multi_avg_fn = get_swa_multi_avg_fn() + multi_avg_fn(self_params, model_params, self.n_averaged.to(device)) + else: + avg_fn = get_swa_avg_fn() + n_averaged = self.n_averaged.to(device) + for p_averaged, p_model in zip(self_params, model_params): + p_averaged.copy_(avg_fn(p_averaged, p_model, n_averaged)) + else: + for p_averaged, p_model in zip(self_param_detached, model_param_detached): + n_averaged = self.n_averaged.to(p_averaged.device) + p_averaged.detach().copy_(self.avg_fn(p_averaged.detach(), p_model, n_averaged)) + + if not self.use_buffers: + # If not apply running averages to the buffers, + # keep the buffers in sync with the source model. + for b_swa, b_model in zip(self.module.buffers(), model.buffers()): + b_swa.detach().copy_(b_model.detach().to(b_swa.device)) + self.n_averaged += 1 + + +@torch.no_grad() +def update_bn(loader, model, device=None): + r"""Updates BatchNorm running_mean, running_var buffers in the model. + + It performs one pass over data in `loader` to estimate the activation + statistics for BatchNorm layers in the model. + Args: + loader (torch.utils.data.DataLoader): dataset loader to compute the + activation statistics on. Each data batch should be either a + tensor, or a list/tuple whose first element is a tensor + containing data. + model (torch.nn.Module): model for which we seek to update BatchNorm + statistics. + device (torch.device, optional): If set, data will be transferred to + :attr:`device` before being passed into :attr:`model`. + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> loader, model = ... + >>> torch.optim.swa_utils.update_bn(loader, model) + + .. note:: + The `update_bn` utility assumes that each data batch in :attr:`loader` + is either a tensor or a list or tuple of tensors; in the latter case it + is assumed that :meth:`model.forward()` should be called on the first + element of the list or tuple corresponding to the data batch. + """ + momenta = {} + for module in model.modules(): + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + module.reset_running_stats() + momenta[module] = module.momentum + + if not momenta: + return + + was_training = model.training + model.train() + for module in momenta.keys(): + module.momentum = None + + for input in loader: + if isinstance(input, (list, tuple)): + input = input[0] + if device is not None: + input = input.to(device) + + model(input) + + for bn_module in momenta.keys(): + bn_module.momentum = momenta[bn_module] + model.train(was_training) + + +class SWALR(LRScheduler): + r"""Anneals the learning rate in each parameter group to a fixed value. + + This learning rate scheduler is meant to be used with Stochastic Weight + Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`). + + Args: + optimizer (torch.optim.Optimizer): wrapped optimizer + swa_lrs (float or list): the learning rate value for all param groups + together or separately for each group. + annealing_epochs (int): number of epochs in the annealing phase + (default: 10) + annealing_strategy (str): "cos" or "linear"; specifies the annealing + strategy: "cos" for cosine annealing, "linear" for linear annealing + (default: "cos") + last_epoch (int): the index of the last epoch (default: -1) + + The :class:`SWALR` scheduler can be used together with other + schedulers to switch to a constant learning rate late in the training + as in the example below. + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> loader, optimizer, model = ... + >>> lr_lambda = lambda epoch: 0.9 + >>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, + >>> lr_lambda=lr_lambda) + >>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer, + >>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05) + >>> swa_start = 160 + >>> for i in range(300): + >>> for input, target in loader: + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + >>> if i > swa_start: + >>> swa_scheduler.step() + >>> else: + >>> scheduler.step() + + .. _Averaging Weights Leads to Wider Optima and Better Generalization: + https://arxiv.org/abs/1803.05407 + """ + def __init__(self, optimizer, swa_lr, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1): + swa_lrs = self._format_param(optimizer, swa_lr) + for swa_lr, group in zip(swa_lrs, optimizer.param_groups): + group['swa_lr'] = swa_lr + if anneal_strategy not in ['cos', 'linear']: + raise ValueError("anneal_strategy must by one of 'cos' or 'linear', " + f"instead got {anneal_strategy}") + elif anneal_strategy == 'cos': + self.anneal_func = self._cosine_anneal + elif anneal_strategy == 'linear': + self.anneal_func = self._linear_anneal + if not isinstance(anneal_epochs, int) or anneal_epochs < 0: + raise ValueError(f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}") + self.anneal_epochs = anneal_epochs + super().__init__(optimizer, last_epoch) + + @staticmethod + def _format_param(optimizer, swa_lrs): + if isinstance(swa_lrs, (list, tuple)): + if len(swa_lrs) != len(optimizer.param_groups): + raise ValueError("swa_lr must have the same length as " + f"optimizer.param_groups: swa_lr has {len(swa_lrs)}, " + f"optimizer.param_groups has {len(optimizer.param_groups)}") + return swa_lrs + else: + return [swa_lrs] * len(optimizer.param_groups) + + @staticmethod + def _linear_anneal(t): + return t + + @staticmethod + def _cosine_anneal(t): + return (1 - math.cos(math.pi * t)) / 2 + + @staticmethod + def _get_initial_lr(lr, swa_lr, alpha): + if alpha == 1: + return swa_lr + return (lr - alpha * swa_lr) / (1 - alpha) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + step = self._step_count - 1 + if self.anneal_epochs == 0: + step = max(1, step) + prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs))) + prev_alpha = self.anneal_func(prev_t) + prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha) + for group in self.optimizer.param_groups] + t = max(0, min(1, step / max(1, self.anneal_epochs))) + alpha = self.anneal_func(t) + return [group['swa_lr'] * alpha + lr * (1 - alpha) + for group, lr in zip(self.optimizer.param_groups, prev_lrs)] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..893081694f5a3adcd30669359b1a69579ab899d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mock.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b74123654a0c442fe2611cd4aaa63dc5c61912b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/find_file_dependencies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da518d31785b6389df3462317c6756564cd03444 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef7a1716af241e21f97f593abde2a2b75960814 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__init__.py @@ -0,0 +1,2 @@ +from .find_first_use_of_broken_modules import find_first_use_of_broken_modules +from .trace_dependencies import trace_dependencies diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99ed9101c30de163219e8b4b794a1462cc0f2011 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1799b5473067c69d4d2cfb97fa35216a9e460a10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/find_first_use_of_broken_modules.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b612a251484660ad61c1717610d3e9166f1f68be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/is_from_package.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87b6383f4206f5edd7b336e49ff9b40e6d1861a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/__pycache__/trace_dependencies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..1910afdd98e34471325c087eacacfd05e98c3df1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/find_first_use_of_broken_modules.py @@ -0,0 +1,31 @@ +from typing import Dict, List + +from ..package_exporter import PackagingError + +__all__ = ["find_first_use_of_broken_modules"] + + +def find_first_use_of_broken_modules(exc: PackagingError) -> Dict[str, List[str]]: + """ + Find all broken modules in a PackagingError, and for each one, return the + dependency path in which the module was first encountered. + + E.g. broken module m.n.o was added to a dependency graph while processing a.b.c, + then re-encountered while processing d.e.f. This method would return + {'m.n.o': ['a', 'b', 'c']} + + Args: + exc: a PackagingError + + Returns: A dict from broken module names to lists of module names in the path. + """ + + assert isinstance(exc, PackagingError), "exception must be a PackagingError" + uses = {} + broken_module_names = [ + m for m, attr in exc.dependency_graph.nodes.items() if attr.get("error", False) + ] + for module_name in broken_module_names: + path = exc.dependency_graph.first_path(module_name) + uses[module_name] = path + return uses diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py new file mode 100644 index 0000000000000000000000000000000000000000..82ff5896b6ffcc2dcb7b15dc169729aceb8b1d75 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/is_from_package.py @@ -0,0 +1,16 @@ +from types import ModuleType +from typing import Any + +from .._mangling import is_mangled + + +def is_from_package(obj: Any) -> bool: + """ + Return whether an object was loaded from a package. + + Note: packaged objects from externed modules will return ``False``. + """ + if type(obj) == ModuleType: + return is_mangled(obj.__name__) + else: + return is_mangled(type(obj).__module__) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..9f882fb33481e3a81c6afa061adf507d79c92628 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/analyze/trace_dependencies.py @@ -0,0 +1,62 @@ +import sys +from typing import Any, Callable, Iterable, List, Tuple + +__all__ = ["trace_dependencies"] + + +def trace_dependencies( + callable: Callable[[Any], Any], inputs: Iterable[Tuple[Any, ...]] +) -> List[str]: + """Trace the execution of a callable in order to determine which modules it uses. + + Args: + callable: The callable to execute and trace. + inputs: The input to use during tracing. The modules used by 'callable' when invoked by each set of inputs + are union-ed to determine all modules used by the callable for the purpooses of packaging. + + Returns: A list of the names of all modules used during callable execution. + """ + modules_used = set() + + def record_used_modules(frame, event, arg): + # If the event being profiled is not a Python function + # call, there is nothing to do. + if event != "call": + return + + # This is the name of the function that was called. + name = frame.f_code.co_name + module = None + + # Try to determine the name of the module that the function + # is in: + # 1) Check the global namespace of the frame. + # 2) Check the local namespace of the frame. + # 3) To handle class instance method calls, check + # the attribute named 'name' of the object + # in the local namespace corresponding to "self". + if name in frame.f_globals: + module = frame.f_globals[name].__module__ + elif name in frame.f_locals: + module = frame.f_locals[name].__module__ + elif "self" in frame.f_locals: + method = getattr(frame.f_locals["self"], name, None) + module = method.__module__ if method else None + + # If a module was found, add it to the set of used modules. + if module: + modules_used.add(module) + + try: + # Attach record_used_modules as the profiler function. + sys.setprofile(record_used_modules) + + # Execute the callable with all inputs. + for inp in inputs: + callable(*inp) + + finally: + # Detach the profiler function. + sys.setprofile(None) + + return list(modules_used)