diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..debcff34b10d62208a70b9754e658219c151235c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c28f2e5e0b4bc69efb87c930c706c91cadad652c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/clog.h b/llmeval-env/lib/python3.10/site-packages/torch/include/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..bec164caaabd0cd89b60afe128cb5e0f736452e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/clog.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY + #if defined(__ELF__) + #define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) + #else + #define CLOG_VISIBILITY + #endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT + #if defined(__GNUC__) + #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) + #else + #define CLOG_ARGUMENTS_FORMAT + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/cpuinfo.h b/llmeval-env/lib/python3.10/site-packages/torch/include/cpuinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb535f1c9e25d133e98253370c917a306c57119 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/cpuinfo.h @@ -0,0 +1,1956 @@ +#pragma once +#ifndef CPUINFO_H +#define CPUINFO_H + +#ifndef __cplusplus + #include +#endif + +#ifdef __APPLE__ + #include +#endif + +#include + +/* Identify architecture and define corresponding macro */ + +#if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) + #define CPUINFO_ARCH_X86 1 +#endif + +#if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) + #define CPUINFO_ARCH_X86_64 1 +#endif + +#if defined(__arm__) || defined(_M_ARM) + #define CPUINFO_ARCH_ARM 1 +#endif + +#if defined(__aarch64__) || defined(_M_ARM64) + #define CPUINFO_ARCH_ARM64 1 +#endif + +#if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) + #define CPUINFO_ARCH_PPC64 1 +#endif + +#if defined(__asmjs__) + #define CPUINFO_ARCH_ASMJS 1 +#endif + +#if defined(__wasm__) + #if defined(__wasm_simd128__) + #define CPUINFO_ARCH_WASMSIMD 1 + #else + #define CPUINFO_ARCH_WASM 1 + #endif +#endif + +/* Define other architecture-specific macros as 0 */ + +#ifndef CPUINFO_ARCH_X86 + #define CPUINFO_ARCH_X86 0 +#endif + +#ifndef CPUINFO_ARCH_X86_64 + #define CPUINFO_ARCH_X86_64 0 +#endif + +#ifndef CPUINFO_ARCH_ARM + #define CPUINFO_ARCH_ARM 0 +#endif + +#ifndef CPUINFO_ARCH_ARM64 + #define CPUINFO_ARCH_ARM64 0 +#endif + +#ifndef CPUINFO_ARCH_PPC64 + #define CPUINFO_ARCH_PPC64 0 +#endif + +#ifndef CPUINFO_ARCH_ASMJS + #define CPUINFO_ARCH_ASMJS 0 +#endif + +#ifndef CPUINFO_ARCH_WASM + #define CPUINFO_ARCH_WASM 0 +#endif + +#ifndef CPUINFO_ARCH_WASMSIMD + #define CPUINFO_ARCH_WASMSIMD 0 +#endif + +#if CPUINFO_ARCH_X86 && defined(_MSC_VER) + #define CPUINFO_ABI __cdecl +#elif CPUINFO_ARCH_X86 && defined(__GNUC__) + #define CPUINFO_ABI __attribute__((__cdecl__)) +#else + #define CPUINFO_ABI +#endif + +#define CPUINFO_CACHE_UNIFIED 0x00000001 +#define CPUINFO_CACHE_INCLUSIVE 0x00000002 +#define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004 + +struct cpuinfo_cache { + /** Cache size in bytes */ + uint32_t size; + /** Number of ways of associativity */ + uint32_t associativity; + /** Number of sets */ + uint32_t sets; + /** Number of partitions */ + uint32_t partitions; + /** Line size in bytes */ + uint32_t line_size; + /** + * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing). + * + * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING + */ + uint32_t flags; + /** Index of the first logical processor that shares this cache */ + uint32_t processor_start; + /** Number of logical processors that share this cache */ + uint32_t processor_count; +}; + +struct cpuinfo_trace_cache { + uint32_t uops; + uint32_t associativity; +}; + +#define CPUINFO_PAGE_SIZE_4KB 0x1000 +#define CPUINFO_PAGE_SIZE_1MB 0x100000 +#define CPUINFO_PAGE_SIZE_2MB 0x200000 +#define CPUINFO_PAGE_SIZE_4MB 0x400000 +#define CPUINFO_PAGE_SIZE_16MB 0x1000000 +#define CPUINFO_PAGE_SIZE_1GB 0x40000000 + +struct cpuinfo_tlb { + uint32_t entries; + uint32_t associativity; + uint64_t pages; +}; + +/** Vendor of processor core design */ +enum cpuinfo_vendor { + /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */ + cpuinfo_vendor_unknown = 0, + + /* Active vendors of modern CPUs */ + + /** + * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures. + * + * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004. + */ + cpuinfo_vendor_intel = 1, + /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */ + cpuinfo_vendor_amd = 2, + /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_arm = 3, + /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_qualcomm = 4, + /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */ + cpuinfo_vendor_apple = 5, + /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */ + cpuinfo_vendor_samsung = 6, + /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */ + cpuinfo_vendor_nvidia = 7, + /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_mips = 8, + /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */ + cpuinfo_vendor_ibm = 9, + /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */ + cpuinfo_vendor_ingenic = 10, + /** + * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures. + * + * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies. + */ + cpuinfo_vendor_via = 11, + /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_cavium = 12, + /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_broadcom = 13, + /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */ + cpuinfo_vendor_apm = 14, + /** + * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures. + * + * Processors are designed by HiSilicon, a subsidiary of Huawei. + */ + cpuinfo_vendor_huawei = 15, + /** + * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures. + * + * Processors are variants of AMD cores. + */ + cpuinfo_vendor_hygon = 16, + + /* Active vendors of embedded CPUs */ + + /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_texas_instruments = 30, + /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */ + cpuinfo_vendor_marvell = 31, + /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_rdc = 32, + /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */ + cpuinfo_vendor_dmp = 33, + /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */ + cpuinfo_vendor_motorola = 34, + + /* Defunct CPU vendors */ + + /** + * Transmeta Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 2004. + * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code. + */ + cpuinfo_vendor_transmeta = 50, + /** + * Cyrix Corporation. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1996. + */ + cpuinfo_vendor_cyrix = 51, + /** + * Rise Technology. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1999. + */ + cpuinfo_vendor_rise = 52, + /** + * National Semiconductor. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998. + */ + cpuinfo_vendor_nsc = 53, + /** + * Silicon Integrated Systems. Vendor of x86 processor microarchitectures. + * + * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001. + */ + cpuinfo_vendor_sis = 54, + /** + * NexGen. Vendor of x86 processor microarchitectures. + * + * Now defunct. The last processor design was released in 1994. + * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations. + */ + cpuinfo_vendor_nexgen = 55, + /** + * United Microelectronics Corporation. Vendor of x86 processor microarchitectures. + * + * Ceased x86 in the early 1990s. The last processor design was released in 1991. + * Designed U5C and U5D processors. Both are 486 level. + */ + cpuinfo_vendor_umc = 56, + /** + * Digital Equipment Corporation. Vendor of ARM processor microarchitecture. + * + * Sold its ARM designs in 1997. The last processor design was released in 1997. + */ + cpuinfo_vendor_dec = 57, +}; + +/** + * Processor microarchitecture + * + * Processors with different microarchitectures often have different instruction performance characteristics, + * and may have dramatically different pipeline organization. + */ +enum cpuinfo_uarch { + /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */ + cpuinfo_uarch_unknown = 0, + + /** Pentium and Pentium MMX microarchitecture. */ + cpuinfo_uarch_p5 = 0x00100100, + /** Intel Quark microarchitecture. */ + cpuinfo_uarch_quark = 0x00100101, + + /** Pentium Pro, Pentium II, and Pentium III. */ + cpuinfo_uarch_p6 = 0x00100200, + /** Pentium M. */ + cpuinfo_uarch_dothan = 0x00100201, + /** Intel Core microarchitecture. */ + cpuinfo_uarch_yonah = 0x00100202, + /** Intel Core 2 microarchitecture on 65 nm process. */ + cpuinfo_uarch_conroe = 0x00100203, + /** Intel Core 2 microarchitecture on 45 nm process. */ + cpuinfo_uarch_penryn = 0x00100204, + /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */ + cpuinfo_uarch_nehalem = 0x00100205, + /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */ + cpuinfo_uarch_sandy_bridge = 0x00100206, + /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */ + cpuinfo_uarch_ivy_bridge = 0x00100207, + /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */ + cpuinfo_uarch_haswell = 0x00100208, + /** Intel Broadwell microarchitecture. */ + cpuinfo_uarch_broadwell = 0x00100209, + /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */ + cpuinfo_uarch_sky_lake = 0x0010020A, + /** DEPRECATED (Intel Kaby Lake microarchitecture). */ + cpuinfo_uarch_kaby_lake = 0x0010020A, + /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */ + cpuinfo_uarch_palm_cove = 0x0010020B, + /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */ + cpuinfo_uarch_sunny_cove = 0x0010020C, + + /** Pentium 4 with Willamette, Northwood, or Foster cores. */ + cpuinfo_uarch_willamette = 0x00100300, + /** Pentium 4 with Prescott and later cores. */ + cpuinfo_uarch_prescott = 0x00100301, + + /** Intel Atom on 45 nm process. */ + cpuinfo_uarch_bonnell = 0x00100400, + /** Intel Atom on 32 nm process. */ + cpuinfo_uarch_saltwell = 0x00100401, + /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */ + cpuinfo_uarch_silvermont = 0x00100402, + /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */ + cpuinfo_uarch_airmont = 0x00100403, + /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */ + cpuinfo_uarch_goldmont = 0x00100404, + /** Intel Goldmont Plus microarchitecture (Gemini Lake). */ + cpuinfo_uarch_goldmont_plus = 0x00100405, + + /** Intel Knights Ferry HPC boards. */ + cpuinfo_uarch_knights_ferry = 0x00100500, + /** Intel Knights Corner HPC boards (aka Xeon Phi). */ + cpuinfo_uarch_knights_corner = 0x00100501, + /** Intel Knights Landing microarchitecture (second-gen MIC). */ + cpuinfo_uarch_knights_landing = 0x00100502, + /** Intel Knights Hill microarchitecture (third-gen MIC). */ + cpuinfo_uarch_knights_hill = 0x00100503, + /** Intel Knights Mill Xeon Phi. */ + cpuinfo_uarch_knights_mill = 0x00100504, + + /** Intel/Marvell XScale series. */ + cpuinfo_uarch_xscale = 0x00100600, + + /** AMD K5. */ + cpuinfo_uarch_k5 = 0x00200100, + /** AMD K6 and alike. */ + cpuinfo_uarch_k6 = 0x00200101, + /** AMD Athlon and Duron. */ + cpuinfo_uarch_k7 = 0x00200102, + /** AMD Athlon 64, Opteron 64. */ + cpuinfo_uarch_k8 = 0x00200103, + /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */ + cpuinfo_uarch_k10 = 0x00200104, + /** + * AMD Bulldozer microarchitecture + * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs. + */ + cpuinfo_uarch_bulldozer = 0x00200105, + /** + * AMD Piledriver microarchitecture + * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs. + */ + cpuinfo_uarch_piledriver = 0x00200106, + /** AMD Steamroller microarchitecture (Kaveri APUs). */ + cpuinfo_uarch_steamroller = 0x00200107, + /** AMD Excavator microarchitecture (Carizzo APUs). */ + cpuinfo_uarch_excavator = 0x00200108, + /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen = 0x00200109, + /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */ + cpuinfo_uarch_zen2 = 0x0020010A, + /** AMD Zen 3 microarchitecture. */ + cpuinfo_uarch_zen3 = 0x0020010B, + /** AMD Zen 4 microarchitecture. */ + cpuinfo_uarch_zen4 = 0x0020010C, + + /** NSC Geode and AMD Geode GX and LX. */ + cpuinfo_uarch_geode = 0x00200200, + /** AMD Bobcat mobile microarchitecture. */ + cpuinfo_uarch_bobcat = 0x00200201, + /** AMD Jaguar mobile microarchitecture. */ + cpuinfo_uarch_jaguar = 0x00200202, + /** AMD Puma mobile microarchitecture. */ + cpuinfo_uarch_puma = 0x00200203, + + /** ARM7 series. */ + cpuinfo_uarch_arm7 = 0x00300100, + /** ARM9 series. */ + cpuinfo_uarch_arm9 = 0x00300101, + /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */ + cpuinfo_uarch_arm11 = 0x00300102, + + /** ARM Cortex-A5. */ + cpuinfo_uarch_cortex_a5 = 0x00300205, + /** ARM Cortex-A7. */ + cpuinfo_uarch_cortex_a7 = 0x00300207, + /** ARM Cortex-A8. */ + cpuinfo_uarch_cortex_a8 = 0x00300208, + /** ARM Cortex-A9. */ + cpuinfo_uarch_cortex_a9 = 0x00300209, + /** ARM Cortex-A12. */ + cpuinfo_uarch_cortex_a12 = 0x00300212, + /** ARM Cortex-A15. */ + cpuinfo_uarch_cortex_a15 = 0x00300215, + /** ARM Cortex-A17. */ + cpuinfo_uarch_cortex_a17 = 0x00300217, + + /** ARM Cortex-A32. */ + cpuinfo_uarch_cortex_a32 = 0x00300332, + /** ARM Cortex-A35. */ + cpuinfo_uarch_cortex_a35 = 0x00300335, + /** ARM Cortex-A53. */ + cpuinfo_uarch_cortex_a53 = 0x00300353, + /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */ + cpuinfo_uarch_cortex_a55r0 = 0x00300354, + /** ARM Cortex-A55. */ + cpuinfo_uarch_cortex_a55 = 0x00300355, + /** ARM Cortex-A57. */ + cpuinfo_uarch_cortex_a57 = 0x00300357, + /** ARM Cortex-A65. */ + cpuinfo_uarch_cortex_a65 = 0x00300365, + /** ARM Cortex-A72. */ + cpuinfo_uarch_cortex_a72 = 0x00300372, + /** ARM Cortex-A73. */ + cpuinfo_uarch_cortex_a73 = 0x00300373, + /** ARM Cortex-A75. */ + cpuinfo_uarch_cortex_a75 = 0x00300375, + /** ARM Cortex-A76. */ + cpuinfo_uarch_cortex_a76 = 0x00300376, + /** ARM Cortex-A77. */ + cpuinfo_uarch_cortex_a77 = 0x00300377, + /** ARM Cortex-A78. */ + cpuinfo_uarch_cortex_a78 = 0x00300378, + + /** ARM Neoverse N1. */ + cpuinfo_uarch_neoverse_n1 = 0x00300400, + /** ARM Neoverse E1. */ + cpuinfo_uarch_neoverse_e1 = 0x00300401, + /** ARM Neoverse V1. */ + cpuinfo_uarch_neoverse_v1 = 0x00300402, + /** ARM Neoverse N2. */ + cpuinfo_uarch_neoverse_n2 = 0x00300403, + /** ARM Neoverse V2. */ + cpuinfo_uarch_neoverse_v2 = 0x00300404, + + /** ARM Cortex-X1. */ + cpuinfo_uarch_cortex_x1 = 0x00300501, + /** ARM Cortex-X2. */ + cpuinfo_uarch_cortex_x2 = 0x00300502, + /** ARM Cortex-X3. */ + cpuinfo_uarch_cortex_x3 = 0x00300503, + + /** ARM Cortex-A510. */ + cpuinfo_uarch_cortex_a510 = 0x00300551, + /** ARM Cortex-A710. */ + cpuinfo_uarch_cortex_a710 = 0x00300571, + /** ARM Cortex-A715. */ + cpuinfo_uarch_cortex_a715 = 0x00300572, + + /** Qualcomm Scorpion. */ + cpuinfo_uarch_scorpion = 0x00400100, + /** Qualcomm Krait. */ + cpuinfo_uarch_krait = 0x00400101, + /** Qualcomm Kryo. */ + cpuinfo_uarch_kryo = 0x00400102, + /** Qualcomm Falkor. */ + cpuinfo_uarch_falkor = 0x00400103, + /** Qualcomm Saphira. */ + cpuinfo_uarch_saphira = 0x00400104, + + /** Nvidia Denver. */ + cpuinfo_uarch_denver = 0x00500100, + /** Nvidia Denver 2. */ + cpuinfo_uarch_denver2 = 0x00500101, + /** Nvidia Carmel. */ + cpuinfo_uarch_carmel = 0x00500102, + + /** Samsung Exynos M1 (Exynos 8890 big cores). */ + cpuinfo_uarch_exynos_m1 = 0x00600100, + /** Samsung Exynos M2 (Exynos 8895 big cores). */ + cpuinfo_uarch_exynos_m2 = 0x00600101, + /** Samsung Exynos M3 (Exynos 9810 big cores). */ + cpuinfo_uarch_exynos_m3 = 0x00600102, + /** Samsung Exynos M4 (Exynos 9820 big cores). */ + cpuinfo_uarch_exynos_m4 = 0x00600103, + /** Samsung Exynos M5 (Exynos 9830 big cores). */ + cpuinfo_uarch_exynos_m5 = 0x00600104, + + /* Deprecated synonym for Cortex-A76 */ + cpuinfo_uarch_cortex_a76ae = 0x00300376, + /* Deprecated names for Exynos. */ + cpuinfo_uarch_mongoose_m1 = 0x00600100, + cpuinfo_uarch_mongoose_m2 = 0x00600101, + cpuinfo_uarch_meerkat_m3 = 0x00600102, + cpuinfo_uarch_meerkat_m4 = 0x00600103, + + /** Apple A6 and A6X processors. */ + cpuinfo_uarch_swift = 0x00700100, + /** Apple A7 processor. */ + cpuinfo_uarch_cyclone = 0x00700101, + /** Apple A8 and A8X processor. */ + cpuinfo_uarch_typhoon = 0x00700102, + /** Apple A9 and A9X processor. */ + cpuinfo_uarch_twister = 0x00700103, + /** Apple A10 and A10X processor. */ + cpuinfo_uarch_hurricane = 0x00700104, + /** Apple A11 processor (big cores). */ + cpuinfo_uarch_monsoon = 0x00700105, + /** Apple A11 processor (little cores). */ + cpuinfo_uarch_mistral = 0x00700106, + /** Apple A12 processor (big cores). */ + cpuinfo_uarch_vortex = 0x00700107, + /** Apple A12 processor (little cores). */ + cpuinfo_uarch_tempest = 0x00700108, + /** Apple A13 processor (big cores). */ + cpuinfo_uarch_lightning = 0x00700109, + /** Apple A13 processor (little cores). */ + cpuinfo_uarch_thunder = 0x0070010A, + /** Apple A14 / M1 processor (big cores). */ + cpuinfo_uarch_firestorm = 0x0070010B, + /** Apple A14 / M1 processor (little cores). */ + cpuinfo_uarch_icestorm = 0x0070010C, + /** Apple A15 / M2 processor (big cores). */ + cpuinfo_uarch_avalanche = 0x0070010D, + /** Apple A15 / M2 processor (little cores). */ + cpuinfo_uarch_blizzard = 0x0070010E, + + /** Cavium ThunderX. */ + cpuinfo_uarch_thunderx = 0x00800100, + /** Cavium ThunderX2 (originally Broadcom Vulkan). */ + cpuinfo_uarch_thunderx2 = 0x00800200, + + /** Marvell PJ4. */ + cpuinfo_uarch_pj4 = 0x00900100, + + /** Broadcom Brahma B15. */ + cpuinfo_uarch_brahma_b15 = 0x00A00100, + /** Broadcom Brahma B53. */ + cpuinfo_uarch_brahma_b53 = 0x00A00101, + + /** Applied Micro X-Gene. */ + cpuinfo_uarch_xgene = 0x00B00100, + + /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */ + cpuinfo_uarch_dhyana = 0x01000100, + + /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */ + cpuinfo_uarch_taishan_v110 = 0x00C00100, +}; + +struct cpuinfo_processor { + /** SMT (hyperthread) ID within a core */ + uint32_t smt_id; + /** Core containing this logical processor */ + const struct cpuinfo_core* core; + /** Cluster of cores containing this logical processor */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this logical processor */ + const struct cpuinfo_package* package; +#if defined(__linux__) + /** + * Linux-specific ID for the logical processor: + * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu/ + * - Bit in the cpu_set_t identifies this logical processor + */ + int linux_id; +#endif +#if defined(_WIN32) || defined(__CYGWIN__) + /** Windows-specific ID for the group containing the logical processor. */ + uint16_t windows_group_id; + /** + * Windows-specific ID of the logical processor within its group: + * - Bit in the KAFFINITY mask identifies this logical processor within its group. + */ + uint16_t windows_processor_id; +#endif +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** APIC ID (unique x86-specific ID of the logical processor) */ + uint32_t apic_id; +#endif + struct { + /** Level 1 instruction cache */ + const struct cpuinfo_cache* l1i; + /** Level 1 data cache */ + const struct cpuinfo_cache* l1d; + /** Level 2 unified or data cache */ + const struct cpuinfo_cache* l2; + /** Level 3 unified or data cache */ + const struct cpuinfo_cache* l3; + /** Level 4 unified or data cache */ + const struct cpuinfo_cache* l4; + } cache; +}; + +struct cpuinfo_core { + /** Index of the first logical processor on this core. */ + uint32_t processor_start; + /** Number of logical processors on this core */ + uint32_t processor_count; + /** Core ID within a package */ + uint32_t core_id; + /** Cluster containing this core */ + const struct cpuinfo_cluster* cluster; + /** Physical package containing this core. */ + const struct cpuinfo_package* package; + /** Vendor of the CPU microarchitecture for this core */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture for this core */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for this core */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for this core */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the core, in Hz */ + uint64_t frequency; +}; + +struct cpuinfo_cluster { + /** Index of the first logical processor in the cluster */ + uint32_t processor_start; + /** Number of logical processors in the cluster */ + uint32_t processor_count; + /** Index of the first core in the cluster */ + uint32_t core_start; + /** Number of cores on the cluster */ + uint32_t core_count; + /** Cluster ID within a package */ + uint32_t cluster_id; + /** Physical package containing the cluster */ + const struct cpuinfo_package* package; + /** CPU microarchitecture vendor of the cores in the cluster */ + enum cpuinfo_vendor vendor; + /** CPU microarchitecture of the cores in the cluster */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register of the cores in the cluster */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) of the cores in the cluster */ + uint32_t midr; +#endif + /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */ + uint64_t frequency; +}; + +#define CPUINFO_PACKAGE_NAME_MAX 48 + +struct cpuinfo_package { + /** SoC or processor chip model name */ + char name[CPUINFO_PACKAGE_NAME_MAX]; + /** Index of the first logical processor on this physical package */ + uint32_t processor_start; + /** Number of logical processors on this physical package */ + uint32_t processor_count; + /** Index of the first core on this physical package */ + uint32_t core_start; + /** Number of cores on this physical package */ + uint32_t core_count; + /** Index of the first cluster of cores on this physical package */ + uint32_t cluster_start; + /** Number of clusters of cores on this physical package */ + uint32_t cluster_count; +}; + +struct cpuinfo_uarch_info { + /** Type of CPU microarchitecture */ + enum cpuinfo_uarch uarch; +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /** Value of CPUID leaf 1 EAX register for the microarchitecture */ + uint32_t cpuid; +#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /** Value of Main ID Register (MIDR) for the microarchitecture */ + uint32_t midr; +#endif + /** Number of logical processors with the microarchitecture */ + uint32_t processor_count; + /** Number of cores with the microarchitecture */ + uint32_t core_count; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +bool CPUINFO_ABI cpuinfo_initialize(void); + +void CPUINFO_ABI cpuinfo_deinitialize(void); + +#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */ + struct cpuinfo_x86_isa { + #if CPUINFO_ARCH_X86 + bool rdtsc; + #endif + bool rdtscp; + bool rdpid; + bool sysenter; + #if CPUINFO_ARCH_X86 + bool syscall; + #endif + bool msr; + bool clzero; + bool clflush; + bool clflushopt; + bool mwait; + bool mwaitx; + #if CPUINFO_ARCH_X86 + bool emmx; + #endif + bool fxsave; + bool xsave; + #if CPUINFO_ARCH_X86 + bool fpu; + bool mmx; + bool mmx_plus; + #endif + bool three_d_now; + bool three_d_now_plus; + #if CPUINFO_ARCH_X86 + bool three_d_now_geode; + #endif + bool prefetch; + bool prefetchw; + bool prefetchwt1; + #if CPUINFO_ARCH_X86 + bool daz; + bool sse; + bool sse2; + #endif + bool sse3; + bool ssse3; + bool sse4_1; + bool sse4_2; + bool sse4a; + bool misaligned_sse; + bool avx; + bool avxvnni; + bool fma3; + bool fma4; + bool xop; + bool f16c; + bool avx2; + bool avx512f; + bool avx512pf; + bool avx512er; + bool avx512cd; + bool avx512dq; + bool avx512bw; + bool avx512vl; + bool avx512ifma; + bool avx512vbmi; + bool avx512vbmi2; + bool avx512bitalg; + bool avx512vpopcntdq; + bool avx512vnni; + bool avx512bf16; + bool avx512fp16; + bool avx512vp2intersect; + bool avx512_4vnniw; + bool avx512_4fmaps; + bool hle; + bool rtm; + bool xtest; + bool mpx; + #if CPUINFO_ARCH_X86 + bool cmov; + bool cmpxchg8b; + #endif + bool cmpxchg16b; + bool clwb; + bool movbe; + #if CPUINFO_ARCH_X86_64 + bool lahf_sahf; + #endif + bool fs_gs_base; + bool lzcnt; + bool popcnt; + bool tbm; + bool bmi; + bool bmi2; + bool adx; + bool aes; + bool vaes; + bool pclmulqdq; + bool vpclmulqdq; + bool gfni; + bool rdrand; + bool rdseed; + bool sha; + bool rng; + bool ace; + bool ace2; + bool phe; + bool pmm; + bool lwp; + }; + + extern struct cpuinfo_x86_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_x86_rdtsc(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.rdtsc; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdtscp(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdtscp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdpid(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdpid; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clzero(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clzero; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwait(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwait; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mwaitx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mwaitx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fxsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fxsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xsave(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xsave; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fpu(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.fpu; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mmx_plus(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.mmx_plus; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_plus(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.three_d_now_plus; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_3dnow_geode(void) { + #if CPUINFO_ARCH_X86_64 + return false; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return false; + #else + return cpuinfo_isa.three_d_now_geode; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetch(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetch; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_prefetchwt1(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.prefetchwt1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_daz(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.daz; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse2(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse2; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_ssse3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.ssse3; + #endif + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_1(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_1; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4_2(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.sse4_2; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.sse4_2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sse4a(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sse4a; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_misaligned_sse(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.misaligned_sse; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avxvnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avxvnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma3(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_fma4(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.fma4; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xop(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xop; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_f16c(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.f16c; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512f(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512f; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512pf(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512pf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512er(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512er; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512cd(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512cd; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512dq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512dq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vl(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vl; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512ifma(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512ifma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vbmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vbmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bitalg(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bitalg; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vpopcntdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vnni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vnni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512bf16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512fp16(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512vp2intersect(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512vp2intersect; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4vnniw(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4vnniw; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_avx512_4fmaps(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.avx512_4fmaps; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_hle(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.hle; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rtm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rtm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_xtest(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.xtest; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_mpx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.mpx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmov(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmov; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg8b(void) { + #if CPUINFO_ARCH_X86_64 + return true; + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.cmpxchg8b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_cmpxchg16b(void) { + #if CPUINFO_ARCH_X86_64 + return cpuinfo_isa.cmpxchg16b; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_clwb(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.clwb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_movbe(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.movbe; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lahf_sahf(void) { + #if CPUINFO_ARCH_X86 + return true; + #elif CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lahf_sahf; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_lzcnt(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.lzcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_popcnt(void) { + #if CPUINFO_ARCH_X86_64 + #if defined(__ANDROID__) + return true; + #else + return cpuinfo_isa.popcnt; + #endif + #elif CPUINFO_ARCH_X86 + return cpuinfo_isa.popcnt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_tbm(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.tbm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_bmi2(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.bmi2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_adx(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.adx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_aes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vaes(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vaes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_pclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.pclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_vpclmulqdq(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.vpclmulqdq; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_gfni(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.gfni; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdrand(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdrand; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_rdseed(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.rdseed; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_x86_sha(void) { + #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 + return cpuinfo_isa.sha; + #else + return false; + #endif +} + +#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */ + struct cpuinfo_arm_isa { + #if CPUINFO_ARCH_ARM + bool thumb; + bool thumb2; + bool thumbee; + bool jazelle; + bool armv5e; + bool armv6; + bool armv6k; + bool armv7; + bool armv7mp; + bool armv8; + bool idiv; + + bool vfpv2; + bool vfpv3; + bool d32; + bool fp16; + bool fma; + + bool wmmx; + bool wmmx2; + bool neon; + #endif + #if CPUINFO_ARCH_ARM64 + bool atomics; + bool bf16; + bool sve; + bool sve2; + bool i8mm; + #endif + bool rdm; + bool fp16arith; + bool dot; + bool jscvt; + bool fcma; + bool fhm; + + bool aes; + bool sha1; + bool sha2; + bool pmull; + bool crc32; + }; + + extern struct cpuinfo_arm_isa cpuinfo_isa; +#endif + +static inline bool cpuinfo_has_arm_thumb(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_thumb2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.thumb2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v5e(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv5e; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v6k(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv6k; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v7mp(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.armv7mp; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_idiv(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.idiv; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_vfpv4_d32(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fp16_arith(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_wmmx2(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.wmmx2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fma(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_v8(void) { + #if CPUINFO_ARCH_ARM64 + return true; + #elif CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.armv8; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_atomics(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.atomics; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_rdm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.rdm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_fp16_arith(void) { + #if CPUINFO_ARCH_ARM + return cpuinfo_isa.neon && cpuinfo_isa.fp16arith; + #elif CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fp16arith; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fhm(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fhm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_dot(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.dot; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_neon_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_jscvt(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.jscvt; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_fcma(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.fcma; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_i8mm(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.i8mm; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_aes(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.aes; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha1(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha1; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sha2(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sha2; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_pmull(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.pmull; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_crc32(void) { + #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 + return cpuinfo_isa.crc32; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve_bf16(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve && cpuinfo_isa.bf16; + #else + return false; + #endif +} + +static inline bool cpuinfo_has_arm_sve2(void) { + #if CPUINFO_ARCH_ARM64 + return cpuinfo_isa.sve2; + #else + return false; + #endif +} + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void); + +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index); +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index); +const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index); +const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index); +const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index); +const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index); + +uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void); +uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void); + +/** + * Returns upper bound on cache size. + */ +uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void); + +/** + * Identify the logical processor that executes the current thread. + * + * There is no guarantee that the thread will stay on the same logical processor for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void); + +/** + * Identify the core that executes the current thread. + * + * There is no guarantee that the thread will stay on the same core for any time. + * Callers should treat the result as only a hint, and be prepared to handle NULL return value. + */ +const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns 0. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void); + +/** + * Identify the microarchitecture index of the core that executes the current thread. + * If the system does not support such identification, the function returns the user-specified default value. + * + * There is no guarantee that the thread will stay on the same type of core for any time. + * Callers should treat the result as only a hint. + */ +uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* CPUINFO_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc74bf644f4b628018d7a9103ba63320abc466d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_H +#define DNNL_H + +#include "oneapi/dnnl/dnnl.h" + +#endif /* DNNL_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_config.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_config.h new file mode 100644 index 0000000000000000000000000000000000000000..48925e1e3ab49ae135c6e9c4c501aa2f5e030913 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_config.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_CONFIG_H +#define DNNL_CONFIG_H + +#include "oneapi/dnnl/dnnl_config.h" + +#endif /* DNNL_CONFIG_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_debug.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..5044971832bbbe56127920a527508b207a803eea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_debug.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_DEBUG_H +#define DNNL_DEBUG_H + +#include "oneapi/dnnl/dnnl_debug.h" + +#endif /* DNNL_DEBUG_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_ocl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_ocl.h new file mode 100644 index 0000000000000000000000000000000000000000..ad731150b28babe7bd5a911acd8de70c57e85254 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_ocl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_OCL_H +#define DNNL_OCL_H + +#include "oneapi/dnnl/dnnl_ocl.h" + +#endif /* DNNL_OCL_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl.h new file mode 100644 index 0000000000000000000000000000000000000000..4501598c2f461021f0fa818e95fd1972ce2d3ace --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_H +#define DNNL_SYCL_H + +#include "oneapi/dnnl/dnnl_sycl.h" + +#endif /* DNNL_SYCL_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a4a854a4cf138103f4c53030083e119cc0732cf1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_SYCL_TYPES_H +#define DNNL_SYCL_TYPES_H + +#include "oneapi/dnnl/dnnl_sycl_types.h" + +#endif /* DNNL_SYCL_TYPES_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..e27e584a65ed16740d4fde93da3a1a049dd111aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_THREADPOOL_H +#define DNNL_THREADPOOL_H + +#include "oneapi/dnnl/dnnl_threadpool.h" + +#endif /* DNNL_THREADPOOL_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_types.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..6f4261b712dc37ec2416ba60c0c68bb30f6995e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_types.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_TYPES_H +#define DNNL_TYPES_H + +#include "oneapi/dnnl/dnnl_types.h" + +#endif /* DNNL_TYPES_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_version.h b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_version.h new file mode 100644 index 0000000000000000000000000000000000000000..32a3d5cf839b1d593f069520febfd60b323730e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_version.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_VERSION_H +#define DNNL_VERSION_H + +#include "oneapi/dnnl/dnnl_version.h" + +#endif /* DNNL_VERSION_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/experiments-config.h b/llmeval-env/lib/python3.10/site-packages/torch/include/experiments-config.h new file mode 100644 index 0000000000000000000000000000000000000000..7c0cba4acdaef0784e7b96bfd6e755254d3eecb4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/experiments-config.h @@ -0,0 +1,25 @@ +// Copyright 2023 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct xnn_experiment_config { + bool adaptive_avx_optimization; +}; + +struct xnn_experiment_config* xnn_get_experiment_config(); + +void xnn_experiment_enable_adaptive_avx_optimization(); + + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/fp16.h b/llmeval-env/lib/python3.10/site-packages/torch/include/fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..9d7366e997dadef17922225bcbb489288f6f9cdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/fp16.h @@ -0,0 +1,11 @@ +#pragma once +#ifndef FP16_H +#define FP16_H + +#include + +#if defined(PSIMD_H) +#include +#endif + +#endif /* FP16_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/fxdiv.h b/llmeval-env/lib/python3.10/site-packages/torch/include/fxdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2c35038d97c55c524bb97caba2e3560cab9da504 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/fxdiv.h @@ -0,0 +1,425 @@ +#pragma once +#ifndef FXDIV_H +#define FXDIV_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include + #include +#endif + +#if defined(_MSC_VER) + #include + #if defined(_M_IX86) || defined(_M_X64) + #include + #endif +#endif + +#ifndef FXDIV_USE_INLINE_ASSEMBLY + #define FXDIV_USE_INLINE_ASSEMBLY 0 +#endif + +static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) { +#if defined(_MSC_VER) && defined(_M_IX86) + return (uint64_t) __emulu((unsigned int) a, (unsigned int) b); +#else + return (uint64_t) a * (uint64_t) b; +#endif +} + +static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b); +#elif defined(_MSC_VER) && defined(_M_IX86) + return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32); +#elif defined(_MSC_VER) && defined(_M_ARM) + return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b); +#else + return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32); +#endif +} + +static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b); +#elif defined(_MSC_VER) && defined(_M_X64) + return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b); +#elif defined(__GNUC__) && defined(__SIZEOF_INT128__) + return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64); +#else + const uint32_t a_lo = (uint32_t) a; + const uint32_t a_hi = (uint32_t) (a >> 32); + const uint32_t b_lo = (uint32_t) b; + const uint32_t b_hi = (uint32_t) (b >> 32); + + const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) + + (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo); + return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) + + ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32); +#endif +} + +static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) { +#if SIZE_MAX == UINT32_MAX + return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b); +#elif SIZE_MAX == UINT64_MAX + return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b); +#else + #error Unsupported platform +#endif +} + +struct fxdiv_divisor_uint32_t { + uint32_t value; + uint32_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint32_t { + uint32_t quotient; + uint32_t remainder; +}; + +struct fxdiv_divisor_uint64_t { + uint64_t value; + uint64_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint64_t { + uint64_t quotient; + uint64_t remainder; +}; + +struct fxdiv_divisor_size_t { + size_t value; + size_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_size_t { + size_t quotient; + size_t remainder; +}; + +static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) { + struct fxdiv_divisor_uint32_t result = { d }; + if (d == 1) { + result.m = UINT32_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t l_minus_1 = 31 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t l_minus_1 = 31 - __clz((int) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse(&l_minus_1, (unsigned long) (d - 1)); + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t l_minus_1; + __asm__("BSRL %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1); + #else + /* Based on Algorithm 2 from Hacker's delight */ + + uint32_t l_minus_1 = 0; + uint32_t x = d - 1; + uint32_t y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + #endif + uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */ + #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t q; + __asm__("DIVL %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (0) + : "cc"); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64)) + unsigned int remainder; + const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder); + #else + const uint32_t q = ((uint64_t) u_hi << 32) / d; + #endif + + result.m = q + UINT32_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) { + struct fxdiv_divisor_uint64_t result = { d }; + if (d == 1) { + result.m = UINT64_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t nlz_d = clz(d); + const uint32_t l_minus_1 = 63 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t nlz_d = __clzll((long long) d); + const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1)); + unsigned long bsr_d; + _BitScanReverse64(&bsr_d, (unsigned __int64) d); + const uint32_t nlz_d = bsr_d ^ 0x3F; + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM)) + const uint64_t d_minus_1 = d - 1; + const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0; + unsigned long l_minus_1; + if ((uint32_t) (d_minus_1 >> 32) == 0) { + _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1); + } else { + _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32)); + l_minus_1 += 32; + } + const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2; + #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t l_minus_1; + __asm__("BSRQ %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1); + const uint32_t nlz_d = __builtin_clzll(d); + #else + /* Based on Algorithm 2 from Hacker's delight */ + const uint64_t d_minus_1 = d - 1; + const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0; + uint32_t l_minus_1 = 0; + uint32_t x = (uint32_t) d_minus_1; + uint32_t y = d_minus_1 >> 32; + if (y != 0) { + l_minus_1 += 32; + x = y; + } + y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2; + #endif + uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */ + #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t q; + __asm__("DIVQ %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (UINT64_C(0)) + : "cc"); + #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__) + /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */ + const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d)); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64) + unsigned __int64 remainder; + const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder); + #else + /* Implementation based on code from Hacker's delight */ + + /* Normalize divisor and shift divident left */ + d <<= nlz_d; + u_hi <<= nlz_d; + /* Break divisor up into two 32-bit digits */ + const uint64_t d_hi = (uint32_t) (d >> 32); + const uint32_t d_lo = (uint32_t) d; + + /* Compute the first quotient digit, q1 */ + uint64_t q1 = u_hi / d_hi; + uint64_t r1 = u_hi - q1 * d_hi; + + while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) { + q1 -= 1; + r1 += d_hi; + if ((r1 >> 32) != 0) { + break; + } + } + + /* Multiply and subtract. */ + u_hi = (u_hi << 32) - q1 * d; + + /* Compute the second quotient digit, q0 */ + uint64_t q0 = u_hi / d_hi; + uint64_t r0 = u_hi - q0 * d_hi; + + while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) { + q0 -= 1; + r0 += d_hi; + if ((r0 >> 32) != 0) { + break; + } + } + const uint64_t q = (q1 << 32) | (uint32_t) q0; + #endif + result.m = q + UINT64_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d); +#else + #error Unsupported platform +#endif + struct fxdiv_divisor_size_t size_result = { + (size_t) uint_result.value, + (size_t) uint_result.m, + uint_result.s1, + uint_result.s2 + }; + return size_result; +} + +static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint32_divisor = { + (uint32_t) divisor.value, + (uint32_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint64_divisor = { + (uint64_t) divisor.value, + (uint64_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor); +#else + #error Unsupported platform +#endif +} + +static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity); + return quotient * granularity.value; +} + +static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity); + return quotient * granularity.value; +} + +static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) { + const size_t quotient = fxdiv_quotient_size_t(n, granularity); + return quotient * granularity.value; +} + +static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + const uint32_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint32_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + const uint64_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint64_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + const size_t remainder = n - quotient * divisor.value; + struct fxdiv_result_size_t result = { quotient, remainder }; + return result; +} + +#endif /* FXDIV_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/libshm.h b/llmeval-env/lib/python3.10/site-packages/torch/include/libshm.h new file mode 100644 index 0000000000000000000000000000000000000000..28024aa2338d1f46ce280abeb92a633f89be1385 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/libshm.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#ifdef __cplusplus + +void libshm_init(const char* manager_exec_path); + +// Superclass to run a constructor before at::RefcountedMapAllocator +class THManagedMapAllocatorInit { + protected: + THManagedMapAllocatorInit(const char* manager_handle, const char* filename); + std::string manager_handle_; +}; + +// Like a at::RefcountedMapAllocator, but it also makes use of an external +// shared memory manager process to ensure that shared memory regions actually +// get freed in the end (even if processes lose the memory). +class THManagedMapAllocator : private THManagedMapAllocatorInit, + public at::RefcountedMapAllocator { + public: + THManagedMapAllocator( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + + void close() override; + + ~THManagedMapAllocator() override { + close(); + } + + static at::DataPtr makeDataPtr( + const char* manager_handle, + const char* filename, + int flags, + size_t size); + static THManagedMapAllocator* fromDataPtr(const at::DataPtr&); + + const char* manager_handle() const { + return manager_handle_.c_str(); + } +}; + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/nnpack.h b/llmeval-env/lib/python3.10/site-packages/torch/include/nnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..97b5ff390076e9ab7ae91e67bfc0d78736aaeffd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/nnpack.h @@ -0,0 +1,659 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any NNPACK function call. + */ +enum nnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + nnp_status_success = 0, + /** NNPACK function was called with batch_size == 0. */ + nnp_status_invalid_batch_size = 2, + /** NNPACK function was called with channels == 0. */ + nnp_status_invalid_channels = 3, + /** NNPACK function was called with input_channels == 0. */ + nnp_status_invalid_input_channels = 4, + /** NNPACK function was called with output_channels == 0. */ + nnp_status_invalid_output_channels = 5, + /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */ + nnp_status_invalid_input_size = 10, + /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */ + nnp_status_invalid_input_stride = 11, + /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.: + * + * - input_padding.left >= kernel_size.width (>= pooling_size.width) + * - input_padding.right >= kernel_size.width (>= pooling_size.width) + * - input_padding.top >= kernel_size.height (>= pooling_size.height) + * - input_padding.bottom >= kernel_size.height (>= pooling_size.height) + */ + nnp_status_invalid_input_padding = 12, + /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */ + nnp_status_invalid_kernel_size = 13, + /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */ + nnp_status_invalid_pooling_size = 14, + /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */ + nnp_status_invalid_pooling_stride = 15, + /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */ + nnp_status_invalid_algorithm = 16, + /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */ + nnp_status_invalid_transform_strategy = 17, + /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */ + nnp_status_invalid_output_subsampling = 13, + /** NNPACK function was called with activation not in nnp_activation enum */ + nnp_status_invalid_activation = 14, + /** NNPACK function was called with invalid activation parameters */ + nnp_status_invalid_activation_parameters = 15, + + /** NNPACK does not support the particular input size for the function */ + nnp_status_unsupported_input_size = 20, + /** NNPACK does not support the particular input stride for the function */ + nnp_status_unsupported_input_stride = 21, + /** NNPACK does not support the particular input padding for the function */ + nnp_status_unsupported_input_padding = 22, + /** NNPACK does not support the particular kernel size for the function */ + nnp_status_unsupported_kernel_size = 23, + /** NNPACK does not support the particular pooling size for the function */ + nnp_status_unsupported_pooling_size = 24, + /** NNPACK does not support the particular pooling stride for the function */ + nnp_status_unsupported_pooling_stride = 25, + /** NNPACK does not support the particular convolution algorithm for the function */ + nnp_status_unsupported_algorithm = 26, + /** NNPACK does not support the particular convolution transform strategy for the algorithm */ + nnp_status_unsupported_transform_strategy = 27, + /** NNPACK does not support the particular activation function for the function */ + nnp_status_unsupported_activation = 28, + /** NNPACK does not support the particular activation function parameters for the function */ + nnp_status_unsupported_activation_parameters = 29, + + /** NNPACK function was called before the library was initialized */ + nnp_status_uninitialized = 50, + /** NNPACK does not implement this function for the host CPU */ + nnp_status_unsupported_hardware = 51, + /** NNPACK failed to allocate memory for temporary buffers */ + nnp_status_out_of_memory = 52, + /** Scratch space buffer is too small */ + nnp_status_insufficient_buffer = 53, + /** Scratch space buffer is not properly aligned */ + nnp_status_misaligned_buffer = 54 +}; + +/** + * @brief Activation applied applied after a convolutional or fully-connected layer. + */ +enum nnp_activation { + /** Identity activation f(x) := x, i.e. no transformation */ + nnp_activation_identity = 0, + /** ReLU activation f(x) := max(0, x) */ + nnp_activation_relu = 1, +}; + +/** + * @brief Algorithm for computing convolutional layers. + */ +enum nnp_convolution_algorithm { + /** Let NNPACK choose the algorithm depending on layer parameters */ + nnp_convolution_algorithm_auto = 0, + /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */ + nnp_convolution_algorithm_ft8x8 = 1, + /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */ + nnp_convolution_algorithm_ft16x16 = 2, + /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */ + nnp_convolution_algorithm_wt8x8 = 3, + /** Direct convolution via implicit GEMM. */ + nnp_convolution_algorithm_implicit_gemm = 4, + /** Direct convolution implementation. */ + nnp_convolution_algorithm_direct = 5, + /** + * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16. + * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP), + * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8. + */ + nnp_convolution_algorithm_wt8x8_fp16 = 6, +}; + +enum nnp_convolution_transform_strategy { + nnp_convolution_transform_strategy_compute = 1, + nnp_convolution_transform_strategy_precompute = 2, + nnp_convolution_transform_strategy_reuse = 3 +}; + +/* For backward compatibility */ +#define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute +#define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute + +/** + * @brief Size of images, kernels, and pooling filters in NNPACK. + */ +struct nnp_size { + /** Width (horizontal size) of an image, kernel, or pooling filter. */ + size_t width; + /** Height (vertical size) of an image, kernel, or pooling filter. */ + size_t height; +}; + +/** + * @brief Padding of images in NNPACK. + */ +struct nnp_padding { + /** Padding above the image data */ + size_t top; + /** Padding on the right of image data */ + size_t right; + /** Padding below the image data */ + size_t bottom; + /** Padding on the left of image data */ + size_t left; +}; + +/** + * @brief Profiling information about time spent in different phases of a function call. + */ +struct nnp_profile { + /** Time spent inside the function call, in seconds. */ + double total; + /** Time spend on transformation of the input or input gradient tensor, in seconds. */ + double input_transform; + /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */ + double kernel_transform; + /** Time spend on transformation of the output or output gradient tensor, in seconds. */ + double output_transform; + /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */ + double block_multiplication; +}; + +enum nnp_status nnp_initialize(void); + +enum nnp_status nnp_deinitialize(void); + +/** + * @brief Computes output of a 2D convolutional layer from input and kernel tensors. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_convolution_inference for optimal performance. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ + +enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients). + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* grad_output, + const float* kernel, + float* grad_input, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[out] grad_kernel A 4D tensor + * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* grad_output, + float* grad_kernel, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param transform_strategy A strategy that guides computation of kernel transforms coefficients. + * Possible values are: + * + * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed + * coefficients. + * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed + * coefficients. + * + * @param input_channels The number of channels (AKA features, dimensions) in the input image. + * @param output_channels The number of channels (AKA features, dimensions) in the output image. + * @param input_size Size of input image, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input image. + * @param kernel_size Kernel size. + * @param output_subsampling Subsample region for output, also known as convolution stride. + * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes. + * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size + * of required workspace memory at the workspace_size location, and exit without + * computations. + * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory + * before and deallocate after this computation, potentially at significant runtime cost. + * @param[in,out] workspace_size Pointer to the size of workspace buffer. + * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to + * the location specified by this pointer. + * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of + * the buffer, in bytes. + * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK + * would allocate memory before and deallocate after this computation, potentially at + * significant runtime cost. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer from input and kernel matrices. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_fully_connected_inference for optimal performance. + * @param batch_size The number of vectors on the input and output of the fully connected layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input matrix. + * @param output_channels The number of channels (AKA features, dimensions) in the output matrix. + * @param[in] input A 2D matrix input[batch_size][input_channels]. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels]. + * @param[out] output A 2D matrix output[batch_size][output_channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_output( + size_t batch_size, + size_t input_channels, + size_t output_channels, + const float input[], + const float kernel[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference( + size_t input_channels, + size_t output_channels, + const float* input, + const float* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference_f16f32( + size_t input_channels, + size_t output_channels, + const float* input, + const void* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a max-pooling layer for an input tensor. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of images on the input and output of the max-pooling layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but + * affect the output size. + * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported. + * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported. + * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width]. + * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where + * output_size.height = ceil( + * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) / + * pooling_stride.height) + 1 + * output_size.width = ceil( + * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) / + * pooling_stride.width) + 1 + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_max_pooling_output( + size_t batch_size, + size_t channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size pooling_size, + struct nnp_size pooling_stride, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a softmax layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the softmax layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output vectors. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_softmax_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + float negative_slope, + pthreadpool_t threadpool); + +/** + * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_input_gradient( + size_t batch_size, + size_t channels, + const float grad_output[], + const float input[], + float grad_input[], + float negative_slope, + pthreadpool_t threadpool); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +// Backward compatible implementations for nnp_convolution_*, if we are in C++ +// mode. +inline enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_output( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, kernel, bias, output, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float grad_output[], + const float kernel[], + float grad_input[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_input_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + grad_output, kernel, grad_input, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float grad_output[], + float grad_kernel[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_kernel_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, grad_output, grad_kernel, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) { + return nnp_convolution_inference( + algorithm, transform_strategy, + input_channels, output_channels, + input_size, input_padding, kernel_size, output_subsampling, + input, kernel, bias, output, NULL, NULL, + nnp_activation_identity, NULL, + threadpool, profile); +} + +#endif // __cplusplus diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/psimd.h b/llmeval-env/lib/python3.10/site-packages/torch/include/psimd.h new file mode 100644 index 0000000000000000000000000000000000000000..b7cb65d799c98931a73b3184511b1bd8c2b30ec0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/psimd.h @@ -0,0 +1,1384 @@ +#pragma once +#ifndef PSIMD_H +#define PSIMD_H + +#if defined(__CUDA_ARCH__) + /* CUDA compiler */ + #define PSIMD_INTRINSIC __forceinline__ __device__ +#elif defined(__OPENCL_VERSION__) + /* OpenCL compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__INTEL_COMPILER) + /* Intel compiler, even on Windows */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(__GNUC__) + /* GCC-compatible compiler (gcc/clang/icc) */ + #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__)) +#elif defined(_MSC_VER) + /* MSVC-compatible compiler (cl/icl/clang-cl) */ + #define PSIMD_INTRINSIC __forceinline static +#elif defined(__cplusplus) + /* Generic C++ compiler */ + #define PSIMD_INTRINSIC inline static +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + /* Generic C99 compiler */ + #define PSIMD_INTRINSIC inline static +#else + /* Generic C compiler */ + #define PSIMD_INTRINSIC static +#endif + +#if defined(__GNUC__) || defined(__clang__) + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + #include + #endif + + #if defined(__SSE2__) + #include + #endif + + #if defined(__SSE3__) + #include + #endif + + #if defined(__SSSE3__) + #include + #endif + + #if defined(__SSE4_1__) + #include + #endif + + #if defined(__SSE4_2__) + #include + #endif + + #if defined(__AVX__) + #include + #endif +#elif defined(_MSC_VER) + #include +#endif + +#if defined(__cplusplus) + #define PSIMD_CXX_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) + #define PSIMD_C11_SYNTAX +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + #define PSIMD_C99_SYNTAX +#else + #define PSIMD_C89_SYNTAX +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define PSIMD_HAVE_F64 0 + #define PSIMD_HAVE_F32 1 + #define PSIMD_HAVE_U8 1 + #define PSIMD_HAVE_S8 1 + #define PSIMD_HAVE_U16 1 + #define PSIMD_HAVE_S16 1 + #define PSIMD_HAVE_U32 1 + #define PSIMD_HAVE_S32 1 + #define PSIMD_HAVE_U64 0 + #define PSIMD_HAVE_S64 0 + + typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1))); + typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1))); + typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2))); + typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2))); + typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4))); + typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4))); + typedef float psimd_f32 __attribute__((vector_size(16), aligned(4))); + + typedef struct { + psimd_s8 lo; + psimd_s8 hi; + } psimd_s8x2; + + typedef struct { + psimd_u8 lo; + psimd_u8 hi; + } psimd_u8x2; + + typedef struct { + psimd_s16 lo; + psimd_s16 hi; + } psimd_s16x2; + + typedef struct { + psimd_u16 lo; + psimd_u16 hi; + } psimd_u16x2; + + typedef struct { + psimd_s32 lo; + psimd_s32 hi; + } psimd_s32x2; + + typedef struct { + psimd_u32 lo; + psimd_u32 hi; + } psimd_u32x2; + + typedef struct { + psimd_f32 lo; + psimd_f32 hi; + } psimd_f32x2; + + /* Bit casts */ + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) { + return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi }; + } + + PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) { + return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi }; + } + + PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) { + return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi }; + } + + /* Swap */ + PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) { + const psimd_s8 new_a = *b; + const psimd_s8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) { + const psimd_u8 new_a = *b; + const psimd_u8 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) { + const psimd_s16 new_a = *b; + const psimd_s16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) { + const psimd_u16 new_a = *b; + const psimd_u16 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) { + const psimd_s32 new_a = *b; + const psimd_s32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) { + const psimd_u32 new_a = *b; + const psimd_u32 new_b = *a; + *a = new_a; + *b = new_b; + } + + PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) { + const psimd_f32 new_a = *b; + const psimd_f32 new_b = *a; + *a = new_a; + *b = new_b; + } + + /* Zero-initialization */ + PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) { + return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) { + return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) { + return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) { + return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) { + return (psimd_s32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) { + return (psimd_u32) { 0, 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) { + return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f }; + } + + /* Initialization to the same constant */ + PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) { + return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) { + return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) { + return (psimd_s16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) { + return (psimd_u16) { c, c, c, c, c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) { + return (psimd_s32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) { + return (psimd_u32) { c, c, c, c }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) { + return (psimd_f32) { c, c, c, c }; + } + + /* Load vector */ + PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) { + return *((const psimd_s8*) address); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) { + return *((const psimd_u8*) address); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) { + return *((const psimd_s16*) address); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) { + return *((const psimd_u16*) address); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) { + return *((const psimd_s32*) address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) { + return *((const psimd_u32*) address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) { + return *((const psimd_f32*) address); + } + + PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) { + return psimd_splat_s8(*((const int8_t*) address)); + } + + PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) { + return psimd_splat_u8(*((const uint8_t*) address)); + } + + PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) { + return psimd_splat_s16(*((const int16_t*) address)); + } + + PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) { + return psimd_splat_u16(*((const uint16_t*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) { + return psimd_splat_s32(*((const int32_t*) address)); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) { + return psimd_splat_u32(*((const uint32_t*) address)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) { + return psimd_splat_f32(*((const float*) address)); + } + + PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) { + return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) { + return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) { + return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) { + const int32_t* address_s32 = (const int32_t*) address; + return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) { + const uint32_t* address_u32 = (const uint32_t*) address; + return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f }; + } + + PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) { + return psimd_load_s32(address); + } + + PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) { + return psimd_load_u32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7); + #else + return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) { + return psimd_load_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) { + const psimd_f32 v0x1x = psimd_load_f32(address); + const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2); + #if defined(__clang__) + return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6); + #else + return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 }); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) { + return psimd_load_stride2_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + const float* address3_f32 = address2_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) { + return psimd_load1_f32(address); + } + + PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) { + const float* address_f32 = (const float*) address; + return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) { + const float* address0_f32 = (const float*) address; + const float* address1_f32 = address0_f32 + stride; + const float* address2_f32 = address1_f32 + stride; + return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f }; + } + + PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) { + return psimd_load_stride_f32(address, stride); + } + + /* Store vector */ + PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) { + *((psimd_s8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) { + *((psimd_u8*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) { + *((psimd_s16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) { + *((psimd_u16*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) { + *((psimd_s32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) { + *((psimd_u32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) { + *((psimd_f32*) address) = value; + } + + PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) { + *((int32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) { + *((uint32_t*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) { + *((float*) address) = value[0]; + } + + PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) { + int32_t* address_s32 = (int32_t*) address; + address_s32[0] = value[0]; + address_s32[1] = value[1]; + address_s32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) { + uint32_t* address_u32 = (uint32_t*) address; + address_u32[0] = value[0]; + address_u32[1] = value[1]; + address_u32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[1] = value[1]; + address_f32[2] = value[2]; + } + + PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) { + psimd_store_s32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) { + psimd_store_u32(address, value); + } + + PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) { + psimd_store_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + float* address3_f32 = address2_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + *address3_f32 = value[3]; + } + + PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) { + psimd_store1_f32(address, value); + } + + PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address_f32 = (float*) address; + address_f32[0] = value[0]; + address_f32[stride] = value[1]; + } + + PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) { + float* address0_f32 = (float*) address; + float* address1_f32 = address0_f32 + stride; + float* address2_f32 = address1_f32 + stride; + *address0_f32 = value[0]; + *address1_f32 = value[1]; + *address2_f32 = value[2]; + } + + /* Vector addition */ + PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) { + return a + b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a + b; + #endif + } + + /* Vector subtraction */ + PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) { + return a - b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a - b; + #endif + } + + /* Vector multiplication */ + PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) { + return a * b; + } + + PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__) + return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b); + #else + return a * b; + #endif + } + + /* Quasi-Fused Multiply-Add */ + PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) { + #if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA) + return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__) + return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a); + #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__) + return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA + return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c); + #else + return a + b * c; + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) { + return a / b; + } + + /* Vector and */ + PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (mask & (psimd_s32) v); + } + + /* Vector and-not */ + PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) { + return (psimd_f32) (~mask & (psimd_s32) v); + } + + /* Vector blend */ + PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s8) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u8) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s16) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u16) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b)); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_s32) __builtin_wasm_bitselect(a, b, mask); + #else + return (mask & a) | (~mask & b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_u32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return (psimd_f32) __builtin_wasm_bitselect(a, b, mask); + #else + return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b)); + #endif + } + + /* Vector blend on sign */ + PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) { + return psimd_blend_s8(x >> psimd_splat_s8(7), a, b); + } + + PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) { + return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b); + } + + PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) { + return psimd_blend_s16(x >> psimd_splat_s16(15), a, b); + } + + PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) { + return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b); + } + + PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) { + return psimd_blend_s32(x >> psimd_splat_s32(31), a, b); + } + + PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) { + return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b); + } + + PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) { + const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31); + return psimd_blend_f32(mask, a, b); + } + + /* Vector absolute value */ + PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v & ~mask); + } + + /* Vector negation */ + PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) { + const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f); + return (psimd_f32) ((psimd_s32) v ^ mask); + } + + /* Vector maximum */ + PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a > b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_max_f32x4(a, b); + #else + return psimd_blend_f32(a > b, a, b); + #endif + } + + /* Vector minimum */ + PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b); + #else + return psimd_blend_s8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b); + #else + return psimd_blend_u8(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b); + #else + return psimd_blend_s16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b); + #else + return psimd_blend_u16(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b); + #else + return psimd_blend_s32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b); + #else + return psimd_blend_u32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) { + #if defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b); + #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) + return __builtin_wasm_min_f32x4(a, b); + #else + return psimd_blend_f32(a < b, a, b); + #endif + } + + PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) { + #if defined(__clang__) + return __builtin_convertvector(v, psimd_f32); + #elif defined(__ARM_NEON__) || defined(__ARM_NEON) + return (psimd_f32) vcvtq_f32_s32((int32x4_t) v); + #elif defined(__SSE2__) + return (psimd_f32) _mm_cvtepi32_ps((__m128i) v); + #else + return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] }; + #endif + } + + /* Broadcast vector element */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 0, 0, 0, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 1, 1, 1, 1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 2, 2, 2, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 3, 3, 3); + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 }); + } + #endif + + /* Reversal of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shufflevector(v, v, 3, 2, 1, 0); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) { + return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) { + return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) { + return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 }); + } + #endif + + /* Interleaving of vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 }); + } + #endif + + /* Concatenation of low/high vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 }); + } + #endif + + /* Concatenation of even/odd vector elements */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shufflevector(a, b, + 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3); + } + #else + PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 }); + } + + PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) { + return __builtin_shuffle(a, b, + (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 }); + } + + PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) { + return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) { + return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 }); + } + #endif + + /* Vector reduce */ + #if defined(__clang__) + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1); + return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1)); + return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2)); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1); + const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1)); + const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1)); + return result[0]; + } + #else + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) { + const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }); + return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) { + const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 })); + return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 })); + } + + PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_sum_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_max_f32(v); + return result[0]; + } + + PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) { + const psimd_f32 result = psimd_allreduce_min_f32(v); + return result[0]; + } + #endif +#endif + +#endif /* PSIMD_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/pthreadpool.h b/llmeval-env/lib/python3.10/site-packages/torch/include/pthreadpool.h new file mode 100644 index 0000000000000000000000000000000000000000..953ccc4cc24070aa4897fabc081cba466e34170a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/pthreadpool.h @@ -0,0 +1,2555 @@ +#ifndef PTHREADPOOL_H_ +#define PTHREADPOOL_H_ + +#include +#include + +typedef struct pthreadpool* pthreadpool_t; + +typedef void (*pthreadpool_task_1d_t)(void*, size_t); +typedef void (*pthreadpool_task_1d_with_thread_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_task_2d_with_thread_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_thread_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_t)(void*, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_1d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t); +typedef void (*pthreadpool_task_2d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t); + +typedef void (*pthreadpool_task_2d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_1d_with_id_with_thread_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); + + +/** + * Disable support for denormalized numbers to the maximum extent possible for + * the duration of the computation. + * + * Handling denormalized floating-point numbers is often implemented in + * microcode, and incurs significant performance degradation. This hint + * instructs the thread pool to disable support for denormalized numbers before + * running the computation by manipulating architecture-specific control + * registers, and restore the initial value of control registers after the + * computation is complete. The thread pool temporary disables denormalized + * numbers on all threads involved in the computation (i.e. the caller threads, + * and potentially worker threads). + * + * Disabling denormalized numbers may have a small negative effect on results' + * accuracy. As various architectures differ in capabilities to control + * processing of denormalized numbers, using this flag may also hurt results' + * reproducibility across different instruction set architectures. + */ +#define PTHREADPOOL_FLAG_DISABLE_DENORMALS 0x00000001 + +/** + * Yield worker threads to the system scheduler after the operation is finished. + * + * Force workers to use kernel wait (instead of active spin-wait by default) for + * new commands after this command is processed. This flag affects only the + * immediate next operation on this thread pool. To make the thread pool always + * use kernel wait, pass this flag to all parallelization functions. + */ +#define PTHREADPOOL_FLAG_YIELD_WORKERS 0x00000002 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Create a thread pool with the specified number of threads. + * + * @param threads_count the number of threads in the thread pool. + * A value of 0 has special interpretation: it creates a thread pool with as + * many threads as there are logical processors in the system. + * + * @returns A pointer to an opaque thread pool object if the call is + * successful, or NULL pointer if the call failed. + */ +pthreadpool_t pthreadpool_create(size_t threads_count); + +/** + * Query the number of threads in a thread pool. + * + * @param threadpool the thread pool to query. + * + * @returns The number of threads in the thread pool. + */ +size_t pthreadpool_get_threads_count(pthreadpool_t threadpool); + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * function(context, thread_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. The + * specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_thread_t function, + void* context, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range; i++) + * function(context, uarch_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range the number of items on the 1D grid to process. + * The specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags); + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * function(context, i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_1d_tile_1d_t function, + void* context, + size_t range, + size_t tile, + uint32_t flags); + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * function(context, thread_index, i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, thread_index, i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, + * cpuinfo initialization failed, or index returned + * by cpuinfo_get_current_uarch_index() exceeds + * the max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected + * by the specified function. If the index returned + * by cpuinfo_get_current_uarch_index() exceeds this + * value, default_uarch_index will be used instead. + * default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 2D grid. + * @param range_j the number of items to process along the second + * dimension of the 2D grid. + * @param tile_j the maximum number of items along the first + * dimension of the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second + * dimension of the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * function(context, i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + pthreadpool_task_3d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension and passing along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_thread_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension using a microarchitecture-aware task function and passing + * along the current thread id. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, thread_index, i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_1d_with_uarch_with_thread( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_1d_with_id_with_thread_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_j the maximum number of items along the second + * dimension of the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * function(context, i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + pthreadpool_task_4d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, uarch_index, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 4D grid. + * @param range_j the number of items to process along the second + * dimension of the 4D grid. + * @param range_k the number of items to process along the third + * dimension of the 4D grid. + * @param range_l the number of items to process along the fourth + * dimension of the 4D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth + * dimension of the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * function(context, i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + pthreadpool_task_5d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * function(context, i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one function call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_5d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags); + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * function(context, i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + pthreadpool_task_6d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_1d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags); + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * function(context, i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified function. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one function call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + pthreadpool_task_6d_tile_2d_t function, + void* context, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags); + +/** + * Terminates threads in the thread pool and releases associated resources. + * + * @warning Accessing the thread pool after a call to this function constitutes + * undefined behaviour and may cause data corruption. + * + * @param[in,out] threadpool The thread pool to destroy. + */ +void pthreadpool_destroy(pthreadpool_t threadpool); + +#ifndef PTHREADPOOL_NO_DEPRECATED_API + +/* Legacy API for compatibility with pre-existing users (e.g. NNPACK) */ +#if defined(__GNUC__) + #define PTHREADPOOL_DEPRECATED __attribute__((__deprecated__)) +#else + #define PTHREADPOOL_DEPRECATED +#endif + +typedef void (*pthreadpool_function_1d_t)(void*, size_t); +typedef void (*pthreadpool_function_1d_tiled_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_t)(void*, size_t, size_t); +typedef void (*pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_3d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_function_4d_tiled_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); + +void pthreadpool_compute_1d( + pthreadpool_t threadpool, + pthreadpool_function_1d_t function, + void* argument, + size_t range) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_1d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_1d_tiled_t function, + void* argument, + size_t range, + size_t tile) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d( + pthreadpool_t threadpool, + pthreadpool_function_2d_t function, + void* argument, + size_t range_i, + size_t range_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_2d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_2d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_3d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_3d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_i, + size_t tile_j, + size_t tile_k) PTHREADPOOL_DEPRECATED; + +void pthreadpool_compute_4d_tiled( + pthreadpool_t threadpool, + pthreadpool_function_4d_tiled_t function, + void* argument, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_i, + size_t tile_j, + size_t tile_k, + size_t tile_l) PTHREADPOOL_DEPRECATED; + +#endif /* PTHREADPOOL_NO_DEPRECATED_API */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus + +namespace libpthreadpool { +namespace detail { +namespace { + +template +void call_wrapper_1d(void* arg, size_t i) { + (*static_cast(arg))(i); +} + +template +void call_wrapper_1d_tile_1d(void* arg, size_t range_i, size_t tile_i) { + (*static_cast(arg))(range_i, tile_i); +} + +template +void call_wrapper_2d(void* functor, size_t i, size_t j) { + (*static_cast(functor))(i, j); +} + +template +void call_wrapper_2d_tile_1d(void* functor, + size_t i, size_t range_j, size_t tile_j) +{ + (*static_cast(functor))(i, range_j, tile_j); +} + +template +void call_wrapper_2d_tile_2d(void* functor, + size_t range_i, size_t range_j, + size_t tile_i, size_t tile_j) +{ + (*static_cast(functor))(range_i, range_j, tile_i, tile_j); +} + +template +void call_wrapper_3d(void* functor, size_t i, size_t j, size_t k) { + (*static_cast(functor))(i, j, k); +} + +template +void call_wrapper_3d_tile_1d(void* functor, + size_t i, size_t j, size_t range_k, + size_t tile_k) +{ + (*static_cast(functor))(i, j, range_k, tile_k); +} + +template +void call_wrapper_3d_tile_2d(void* functor, + size_t i, size_t range_j, size_t range_k, + size_t tile_j, size_t tile_k) +{ + (*static_cast(functor))(i, range_j, range_k, tile_j, tile_k); +} + +template +void call_wrapper_4d(void* functor, size_t i, size_t j, size_t k, size_t l) { + (*static_cast(functor))(i, j, k, l); +} + +template +void call_wrapper_4d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, + size_t tile_l) +{ + (*static_cast(functor))(i, j, k, range_l, tile_l); +} + +template +void call_wrapper_4d_tile_2d(void* functor, + size_t i, size_t j, size_t range_k, size_t range_l, + size_t tile_k, size_t tile_l) +{ + (*static_cast(functor))(i, j, range_k, range_l, tile_k, tile_l); +} + +template +void call_wrapper_5d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m) { + (*static_cast(functor))(i, j, k, l, m); +} + +template +void call_wrapper_5d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, + size_t tile_m) +{ + (*static_cast(functor))(i, j, k, l, range_m, tile_m); +} + +template +void call_wrapper_5d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t range_l, size_t range_m, + size_t tile_l, size_t tile_m) +{ + (*static_cast(functor))(i, j, k, range_l, range_m, tile_l, tile_m); +} + +template +void call_wrapper_6d(void* functor, size_t i, size_t j, size_t k, size_t l, size_t m, size_t n) { + (*static_cast(functor))(i, j, k, l, m, n); +} + +template +void call_wrapper_6d_tile_1d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t m, size_t range_n, + size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, m, range_n, tile_n); +} + +template +void call_wrapper_6d_tile_2d(void* functor, + size_t i, size_t j, size_t k, size_t l, size_t range_m, size_t range_n, + size_t tile_m, size_t tile_n) +{ + (*static_cast(functor))(i, j, k, l, range_m, range_n, tile_m, tile_n); +} + +} /* namespace */ +} /* namespace detail */ +} /* namespace libpthreadpool */ + +/** + * Process items on a 1D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i++) + * functor(i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range the number of items on the 1D grid to process. The + * specified functor will be called once for each item. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d, + const_cast(static_cast(&functor)), + range, + flags); +} + +/** + * Process items on a 1D grid with specified maximum tile size. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range; i += tile) + * functor(i, min(range - i, tile)); + * + * When the call returns, all items have been processed and the thread pool is + * ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, + * the calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range the number of items on the 1D grid to process. + * @param tile the maximum number of items on the 1D grid to process in + * one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_1d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range, + size_t tile, + uint32_t flags = 0) +{ + pthreadpool_parallelize_1d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_1d_tile_1d, + const_cast(static_cast(&functor)), + range, + tile, + flags); +} + +/** + * Process items on a 2D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * functor(i, j); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each item. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_j, + flags); +} + +/** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * functor(i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 2D grid. + * @param range_j the number of items to process along the second dimension + * of the 2D grid. + * @param tile_j the maximum number of items along the first dimension of + * the 2D grid to process in one functor call. + * @param tile_j the maximum number of items along the second dimension of + * the 2D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_2d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags = 0) +{ + pthreadpool_parallelize_2d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_2d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + tile_i, + tile_j, + flags); +} + +/** + * Process items on a 3D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * functor(i, j, k); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_k, + flags); +} + +/** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * functor(i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 3D grid. + * @param range_j the number of items to process along the second dimension + * of the 3D grid. + * @param range_k the number of items to process along the third dimension + * of the 3D grid. + * @param tile_j the maximum number of items along the second dimension of + * the 3D grid to process in one functor call. + * @param tile_k the maximum number of items along the third dimension of + * the 3D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_3d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags = 0) +{ + pthreadpool_parallelize_3d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_3d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + tile_j, + tile_k, + flags); +} + +/** + * Process items on a 4D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * functor(i, j, k, l); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_l, + flags); +} + +/** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * functor(i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 4D grid. + * @param range_j the number of items to process along the second dimension + * of the 4D grid. + * @param range_k the number of items to process along the third dimension + * of the 4D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 4D grid. + * @param tile_k the maximum number of items along the third dimension of + * the 4D grid to process in one functor call. + * @param tile_l the maximum number of items along the fourth dimension of + * the 4D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_4d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags = 0) +{ + pthreadpool_parallelize_4d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_4d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + tile_k, + tile_l, + flags); +} + +/** + * Process items on a 5D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * functor(i, j, k, l, m); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_m, + flags); +} + +/** + * Process items on a 5D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l += tile_l) + * for (size_t m = 0; m < range_m; m += tile_m) + * functor(i, j, k, l, m, + * min(range_l - l, tile_l), min(range_m - m, tile_m)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 5D grid. + * @param range_j the number of items to process along the second dimension + * of the 5D grid. + * @param range_k the number of items to process along the third dimension + * of the 5D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 5D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 5D grid. + * @param tile_l the maximum number of items along the fourth dimension of + * the 5D grid to process in one functor call. + * @param tile_m the maximum number of items along the fifth dimension of + * the 5D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_5d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t tile_l, + size_t tile_m, + uint32_t flags = 0) +{ + pthreadpool_parallelize_5d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_5d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + tile_l, + tile_m, + flags); +} + +/** + * Process items on a 6D grid. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n++) + * functor(i, j, k, l, m, n); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last grid dimension. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m++) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_1d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_1d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_1d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_n, + flags); +} + +/** + * Process items on a 6D grid with the specified maximum tile size along the + * last two grid dimensions. + * + * The function implements a parallel version of the following snippet: + * + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k++) + * for (size_t l = 0; l < range_l; l++) + * for (size_t m = 0; m < range_m; m += tile_m) + * for (size_t n = 0; n < range_n; n += tile_n) + * functor(i, j, k, l, m, n, + * min(range_m - m, tile_m), min(range_n - n, tile_n)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If threadpool + * is NULL, all items are processed serially on the calling thread. + * @param functor the functor to call for each tile. + * @param range_i the number of items to process along the first dimension + * of the 6D grid. + * @param range_j the number of items to process along the second dimension + * of the 6D grid. + * @param range_k the number of items to process along the third dimension + * of the 6D grid. + * @param range_l the number of items to process along the fourth dimension + * of the 6D grid. + * @param range_m the number of items to process along the fifth dimension + * of the 6D grid. + * @param range_n the number of items to process along the sixth dimension + * of the 6D grid. + * @param tile_m the maximum number of items along the fifth dimension of + * the 6D grid to process in one functor call. + * @param tile_n the maximum number of items along the sixth dimension of + * the 6D grid to process in one functor call. + * @param flags a bitwise combination of zero or more optional flags + * (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +template +inline void pthreadpool_parallelize_6d_tile_2d( + pthreadpool_t threadpool, + const T& functor, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t range_m, + size_t range_n, + size_t tile_m, + size_t tile_n, + uint32_t flags = 0) +{ + pthreadpool_parallelize_6d_tile_2d( + threadpool, + &libpthreadpool::detail::call_wrapper_6d_tile_2d, + const_cast(static_cast(&functor)), + range_i, + range_j, + range_k, + range_l, + range_m, + range_n, + tile_m, + tile_n, + flags); +} + +#endif /* __cplusplus */ + +#endif /* PTHREADPOOL_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack.h b/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..591fa68eba5a3c8a6b22c12c4fa6efbefd098b84 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any QNNPACK function call. + */ +enum qnnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + qnnp_status_success = 0, + qnnp_status_uninitialized = 1, + qnnp_status_invalid_parameter = 2, + qnnp_status_unsupported_parameter = 3, + qnnp_status_unsupported_hardware = 4, + qnnp_status_out_of_memory = 5, +}; + +enum qnnp_status qnnp_initialize(void); + +enum qnnp_status qnnp_deinitialize(void); + +typedef struct qnnp_operator* qnnp_operator_t; + +enum qnnp_status qnnp_create_convolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* convolution); + +enum qnnp_status qnnp_setup_convolution2d_nhwc_q8( + qnnp_operator_t convolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_deconvolution2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* deconvolution); + +enum qnnp_status qnnp_setup_deconvolution2d_nhwc_q8( + qnnp_operator_t deconvolution, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_fully_connected_nc_q8( + size_t input_channels, + size_t output_channels, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* fully_connected); + +enum qnnp_status qnnp_setup_fully_connected_nc_q8( + qnnp_operator_t fully_connected, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_global_average_pooling_nwc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* global_average_pooling); + +enum qnnp_status qnnp_setup_global_average_pooling_nwc_q8( + qnnp_operator_t global_average_pooling, + size_t batch_size, + size_t width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_average_pooling2d_nhwc_q8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* average_pooling); + +enum qnnp_status qnnp_setup_average_pooling2d_nhwc_q8( + qnnp_operator_t average_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* max_pooling); + +enum qnnp_status qnnp_setup_max_pooling2d_nhwc_u8( + qnnp_operator_t max_pooling, + size_t batch_size, + size_t input_height, + size_t input_width, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + uint32_t flags, + qnnp_operator_t* channel_shuffle); + +enum qnnp_status qnnp_setup_channel_shuffle_nc_x8( + qnnp_operator_t channel_shuffle, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_add_nc_q8( + size_t channels, + uint8_t a_zero_point, + float a_scale, + uint8_t b_zero_point, + float b_scale, + uint8_t sum_zero_point, + float sum_scale, + uint8_t sum_min, + uint8_t sum_max, + uint32_t flags, + qnnp_operator_t* add); + +enum qnnp_status qnnp_setup_add_nc_q8( + qnnp_operator_t add, + size_t batch_size, + const uint8_t* a, + size_t a_stride, + const uint8_t* b, + size_t b_stride, + uint8_t* sum, + size_t sum_stride); + +enum qnnp_status qnnp_create_clamp_nc_u8( + size_t channels, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* clamp); + +enum qnnp_status qnnp_setup_clamp_nc_u8( + qnnp_operator_t clamp, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_sigmoid_nc_q8( + size_t channels, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* sigmoid); + +enum qnnp_status qnnp_setup_sigmoid_nc_q8( + qnnp_operator_t sigmoid, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_leaky_relu_nc_q8( + size_t channels, + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + qnnp_operator_t* leaky_relu); + +enum qnnp_status qnnp_setup_leaky_relu_nc_q8( + qnnp_operator_t leaky_relu, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_create_softargmax_nc_q8( + size_t channels, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + qnnp_operator_t* softargmax); + +enum qnnp_status qnnp_setup_softargmax_nc_q8( + qnnp_operator_t softargmax, + size_t batch_size, + const uint8_t* input, + size_t input_stride, + uint8_t* output, + size_t output_stride); + +enum qnnp_status qnnp_run_operator( + qnnp_operator_t op, + pthreadpool_t threadpool); + +enum qnnp_status qnnp_delete_operator( + qnnp_operator_t op); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack_func.h b/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack_func.h new file mode 100644 index 0000000000000000000000000000000000000000..10bbc000192d7e03745e2cf3fb263a9655cde00c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack_func.h @@ -0,0 +1,166 @@ +#pragma once + +#include +#include + +namespace qnnpack { +class PrePackConvWeights final { + public: + PrePackConvWeights( + const pytorch_qnnp_operator_t convolution, + const uint8_t* kernel_zero_points, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + int64_t getOutputChannels() const + { + return output_channels_; + } + + ~PrePackConvWeights() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PrePackConvWeights() = delete; + PrePackConvWeights(const PrePackConvWeights&) = delete; + PrePackConvWeights& operator=(const PrePackConvWeights&) = delete; + + private: + void* packed_weights_ = nullptr; + int64_t output_channels_; +}; + +class PackBMatrix final { + public: + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t* kernel_zero_points, + const float* requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + // This constructor is to be used for dynamic mode + // quantization. In dynamic mode, we dont yet support + // per channel quantization, and paying the cost of + // memory allocation for per channel zero point and + // requant scale will hurt performance. + PackBMatrix( + size_t input_channels, + size_t output_channels, + const uint8_t kernel_zero_point, + const float requantization_scale, + const uint8_t* kernel, + const int32_t* bias); + + void* getPackedWeights() const + { + return packed_weights_; + } + + void unpackWeights( + const uint8_t* kernel_zero_points, + int8_t* kernel + ) const; + + size_t getInputChannels() const + { + return input_channels_; + } + + size_t getOutputChannels() const + { + return output_channels_; + } + + ~PackBMatrix() + { + if (packed_weights_ != nullptr) { + free(packed_weights_); + } + } + + PackBMatrix() = delete; + PackBMatrix(const PackBMatrix&) = delete; + PackBMatrix& operator=(const PackBMatrix&) = delete; + + private: + void* packed_weights_ = nullptr; + size_t input_channels_; + size_t output_channels_; +}; + +enum pytorch_qnnp_status qnnpackLinear( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + uint8_t* output, + const size_t output_stride, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackConv( + const pytorch_qnnp_operator_t convolution, + void* packed_weights, + const size_t batch_size, + const size_t input_depth, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackDeConv( + const pytorch_qnnp_operator_t deconvolution, + void* packed_weights, + const size_t batch_size, + const size_t input_height, + const size_t input_width, + const uint8_t input_zero_point, + const uint8_t* input, + const uint8_t* kernel_zero_points, + const float* requantization_scales, + const uint8_t output_zero_point, + const uint8_t output_min, + const uint8_t output_max, + uint8_t* output, + pthreadpool_t threadpool); + +enum pytorch_qnnp_status qnnpackLinearDynamic( + const size_t batch_size, + const size_t input_channels, + const size_t output_channels, + const uint8_t input_zero_point, + const uint8_t* kernel_zero_points, + const float* dequantization_scales, + const uint8_t* input, + const size_t input_stride, + void* packed_weights, + const float* bias, + float* output, + const size_t output_stride, + pthreadpool_t threadpool); + +} // namespace qnnpack diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/sleef.h b/llmeval-env/lib/python3.10/site-packages/torch/include/sleef.h new file mode 100644 index 0000000000000000000000000000000000000000..de36514f991a5f9b4774b232a1a6350c47c2c74c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/sleef.h @@ -0,0 +1,4459 @@ +// Copyright Naoki Shibata and contributors 2010 - 2020. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef __SLEEF_H__ +#define __SLEEF_H__ + +#define SLEEF_VERSION_MAJOR 3 +#define SLEEF_VERSION_MINOR 6 +#define SLEEF_VERSION_PATCHLEVEL 0 + +#include +#include + +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define CONST const +#else +#define CONST +#endif + +#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__) +#ifndef FP_FAST_FMA +#define FP_FAST_FMA +#endif +#ifndef FP_FAST_FMAF +#define FP_FAST_FMAF +#endif +#endif + +#if defined(_MSC_VER) && !defined(__STDC__) +#define __STDC__ 1 +#endif + +#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllexport) +#else // #ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllimport) +#if (defined(_MSC_VER)) +#pragma comment(lib,"sleef.lib") +#endif // #if (defined(_MSC_VER)) +#endif // #ifdef IMPORT_IS_EXPORT +#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#define IMPORT +#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) + +#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__)) +#include +#endif + +#if (defined(_MSC_VER)) +#include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include +#endif + +#if defined(__ARM_FEATURE_SVE) +#include +#endif + +#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +#include +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +#if defined(__VX__) && defined(__VEC__) +#ifndef SLEEF_VECINTRIN_H_INCLUDED +#include +#define SLEEF_VECINTRIN_H_INCLUDED +#endif +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +// + +#ifndef SLEEF_FP_ILOGB0 +#define SLEEF_FP_ILOGB0 ((int)-2147483648) +#endif + +#ifndef SLEEF_FP_ILOGBNAN +#define SLEEF_FP_ILOGBNAN ((int)2147483647) +#endif + +// + +IMPORT void *Sleef_malloc(size_t z); +IMPORT void Sleef_free(void *ptr); +IMPORT uint64_t Sleef_currentTimeMicros(); + +#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER) +IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx); +#endif + +// + +#ifndef Sleef_double2_DEFINED +#define Sleef_double2_DEFINED +typedef struct { + double x, y; +} Sleef_double2; +#endif + +#ifndef Sleef_float2_DEFINED +#define Sleef_float2_DEFINED +typedef struct { + float x, y; +} Sleef_float2; +#endif + +#ifndef Sleef_longdouble2_DEFINED +#define Sleef_longdouble2_DEFINED +typedef struct { + long double x, y; +} Sleef_longdouble2; +#endif + +#if !defined(Sleef_quad_DEFINED) +#define Sleef_quad_DEFINED +#if defined(__SIZEOF_FLOAT128__) || (defined(__linux__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(__PPC64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 8) +typedef __float128 Sleef_quad; +#define SLEEF_QUAD_C(x) (x ## Q) +//#elif defined(__SIZEOF_LONG_DOUBLE__) && defined(__aarch64__) +//typedef long double Sleef_quad; +//#define SLEEF_QUAD_C(x) (x ## L) +#else +typedef struct { uint64_t x, y; } Sleef_quad; +#endif +#endif + +#if !defined(Sleef_quad2_DEFINED) +#define Sleef_quad2_DEFINED +typedef union { + struct { + Sleef_quad x, y; + }; + Sleef_quad s[2]; +} Sleef_quad2; +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + +IMPORT CONST double Sleef_sin_u35(double); +IMPORT CONST double Sleef_cos_u35(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u35(double); +IMPORT CONST double Sleef_tan_u35(double); +IMPORT CONST double Sleef_asin_u35(double); +IMPORT CONST double Sleef_acos_u35(double); +IMPORT CONST double Sleef_atan_u35(double); +IMPORT CONST double Sleef_atan2_u35(double, double); +IMPORT CONST double Sleef_log_u35(double); +IMPORT CONST double Sleef_cbrt_u35(double); +IMPORT CONST double Sleef_sin_u10(double); +IMPORT CONST double Sleef_cos_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u10(double); +IMPORT CONST double Sleef_tan_u10(double); +IMPORT CONST double Sleef_asin_u10(double); +IMPORT CONST double Sleef_acos_u10(double); +IMPORT CONST double Sleef_atan_u10(double); +IMPORT CONST double Sleef_atan2_u10(double, double); +IMPORT CONST double Sleef_log_u10(double); +IMPORT CONST double Sleef_cbrt_u10(double); +IMPORT CONST double Sleef_exp_u10(double); +IMPORT CONST double Sleef_pow_u10(double, double); +IMPORT CONST double Sleef_sinh_u10(double); +IMPORT CONST double Sleef_cosh_u10(double); +IMPORT CONST double Sleef_tanh_u10(double); +IMPORT CONST double Sleef_sinh_u35(double); +IMPORT CONST double Sleef_cosh_u35(double); +IMPORT CONST double Sleef_tanh_u35(double); +IMPORT CONST double Sleef_asinh_u10(double); +IMPORT CONST double Sleef_acosh_u10(double); +IMPORT CONST double Sleef_atanh_u10(double); +IMPORT CONST double Sleef_exp2_u10(double); +IMPORT CONST double Sleef_exp10_u10(double); +IMPORT CONST double Sleef_exp2_u35(double); +IMPORT CONST double Sleef_exp10_u35(double); +IMPORT CONST double Sleef_expm1_u10(double); +IMPORT CONST double Sleef_log10_u10(double); +IMPORT CONST double Sleef_log2_u10(double); +IMPORT CONST double Sleef_log2_u35(double); +IMPORT CONST double Sleef_log1p_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u05(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u35(double); +IMPORT CONST double Sleef_sinpi_u05(double); +IMPORT CONST double Sleef_cospi_u05(double); +IMPORT CONST double Sleef_ldexp(double, int); +IMPORT CONST int Sleef_ilogb(double); +IMPORT CONST double Sleef_fma(double, double, double); +IMPORT CONST double Sleef_sqrt(double); +IMPORT CONST double Sleef_sqrt_u05(double); +IMPORT CONST double Sleef_sqrt_u35(double); + +IMPORT CONST double Sleef_hypot_u05(double, double); +IMPORT CONST double Sleef_hypot_u35(double, double); + +IMPORT CONST double Sleef_fabs(double); +IMPORT CONST double Sleef_copysign(double, double); +IMPORT CONST double Sleef_fmax(double, double); +IMPORT CONST double Sleef_fmin(double, double); +IMPORT CONST double Sleef_fdim(double, double); +IMPORT CONST double Sleef_trunc(double); +IMPORT CONST double Sleef_floor(double); +IMPORT CONST double Sleef_ceil(double); +IMPORT CONST double Sleef_round(double); +IMPORT CONST double Sleef_rint(double); +IMPORT CONST double Sleef_nextafter(double, double); +IMPORT CONST double Sleef_frfrexp(double); +IMPORT CONST int Sleef_expfrexp(double); +IMPORT CONST double Sleef_fmod(double, double); +IMPORT CONST double Sleef_remainder(double, double); +IMPORT CONST Sleef_double2 Sleef_modf(double); + +IMPORT CONST double Sleef_lgamma_u10(double); +IMPORT CONST double Sleef_tgamma_u10(double); +IMPORT CONST double Sleef_erf_u10(double); +IMPORT CONST double Sleef_erfc_u15(double); + +IMPORT CONST float Sleef_sinf_u35(float); +IMPORT CONST float Sleef_cosf_u35(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u35(float); +IMPORT CONST float Sleef_tanf_u35(float); +IMPORT CONST float Sleef_asinf_u35(float); +IMPORT CONST float Sleef_acosf_u35(float); +IMPORT CONST float Sleef_atanf_u35(float); +IMPORT CONST float Sleef_atan2f_u35(float, float); +IMPORT CONST float Sleef_logf_u35(float); +IMPORT CONST float Sleef_cbrtf_u35(float); +IMPORT CONST float Sleef_sinf_u10(float); +IMPORT CONST float Sleef_cosf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u10(float); +IMPORT CONST float Sleef_fastsinf_u3500(float); +IMPORT CONST float Sleef_fastcosf_u3500(float); +IMPORT CONST float Sleef_tanf_u10(float); +IMPORT CONST float Sleef_asinf_u10(float); +IMPORT CONST float Sleef_acosf_u10(float); +IMPORT CONST float Sleef_atanf_u10(float); +IMPORT CONST float Sleef_atan2f_u10(float, float); +IMPORT CONST float Sleef_logf_u10(float); +IMPORT CONST float Sleef_cbrtf_u10(float); +IMPORT CONST float Sleef_expf_u10(float); +IMPORT CONST float Sleef_powf_u10(float, float); +IMPORT CONST float Sleef_fastpowf_u3500(float, float); +IMPORT CONST float Sleef_sinhf_u10(float); +IMPORT CONST float Sleef_coshf_u10(float); +IMPORT CONST float Sleef_tanhf_u10(float); +IMPORT CONST float Sleef_sinhf_u35(float); +IMPORT CONST float Sleef_coshf_u35(float); +IMPORT CONST float Sleef_tanhf_u35(float); +IMPORT CONST float Sleef_asinhf_u10(float); +IMPORT CONST float Sleef_acoshf_u10(float); +IMPORT CONST float Sleef_atanhf_u10(float); +IMPORT CONST float Sleef_exp2f_u10(float); +IMPORT CONST float Sleef_exp10f_u10(float); +IMPORT CONST float Sleef_exp2f_u35(float); +IMPORT CONST float Sleef_exp10f_u35(float); +IMPORT CONST float Sleef_expm1f_u10(float); +IMPORT CONST float Sleef_log10f_u10(float); +IMPORT CONST float Sleef_log2f_u10(float); +IMPORT CONST float Sleef_log2f_u35(float); +IMPORT CONST float Sleef_log1pf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u05(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u35(float); +IMPORT CONST float Sleef_sinpif_u05(float d); +IMPORT CONST float Sleef_cospif_u05(float d); +IMPORT CONST float Sleef_ldexpf(float, int); +IMPORT CONST int Sleef_ilogbf(float); +IMPORT CONST float Sleef_fmaf(float, float, float); +IMPORT CONST float Sleef_sqrtf(float); +IMPORT CONST float Sleef_sqrtf_u05(float); +IMPORT CONST float Sleef_sqrtf_u35(float); + +IMPORT CONST float Sleef_hypotf_u05(float, float); +IMPORT CONST float Sleef_hypotf_u35(float, float); + +IMPORT CONST float Sleef_fabsf(float); +IMPORT CONST float Sleef_copysignf(float, float); +IMPORT CONST float Sleef_fmaxf(float, float); +IMPORT CONST float Sleef_fminf(float, float); +IMPORT CONST float Sleef_fdimf(float, float); +IMPORT CONST float Sleef_truncf(float); +IMPORT CONST float Sleef_floorf(float); +IMPORT CONST float Sleef_ceilf(float); +IMPORT CONST float Sleef_roundf(float); +IMPORT CONST float Sleef_rintf(float); +IMPORT CONST float Sleef_nextafterf(float, float); +IMPORT CONST float Sleef_frfrexpf(float); +IMPORT CONST int Sleef_expfrexpf(float); +IMPORT CONST float Sleef_fmodf(float, float); +IMPORT CONST float Sleef_remainderf(float, float); +IMPORT CONST Sleef_float2 Sleef_modff(float); + +IMPORT CONST float Sleef_lgammaf_u10(float); +IMPORT CONST float Sleef_tgammaf_u10(float); +IMPORT CONST float Sleef_erff_u10(float); +IMPORT CONST float Sleef_erfcf_u15(float); + +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u05(long double); +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u35(long double); + +#if defined(Sleef_quad2_DEFINED) +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u05(Sleef_quad); +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u35(Sleef_quad); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2(__m128d); +IMPORT CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2(__m128d); +IMPORT CONST __m128d Sleef_floord2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2(__m128d); +IMPORT CONST __m128d Sleef_ceild2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2(__m128d); +IMPORT CONST __m128d Sleef_roundd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2(__m128d); +IMPORT CONST __m128d Sleef_rintd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15(__m128d); +IMPORT CONST int Sleef_getIntd2(int); +IMPORT CONST void *Sleef_getPtrd2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4(__m128); +IMPORT CONST __m128 Sleef_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4(__m128); +IMPORT CONST __m128 Sleef_floorf4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4(__m128); +IMPORT CONST __m128 Sleef_ceilf4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4(__m128); +IMPORT CONST __m128 Sleef_roundf4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4(__m128); +IMPORT CONST __m128 Sleef_rintf4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15(__m128); +IMPORT CONST int Sleef_getIntf4(int); +IMPORT CONST int Sleef_cinz_getIntf4(int); +IMPORT CONST void *Sleef_getPtrf4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d); +IMPORT CONST int Sleef_getIntd2_sse2(int); +IMPORT CONST void *Sleef_getPtrd2_sse2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128); +IMPORT CONST int Sleef_getIntf4_sse2(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse2(int); +IMPORT CONST void *Sleef_getPtrf4_sse2(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d); +IMPORT CONST int Sleef_getIntd2_sse4(int); +IMPORT CONST void *Sleef_getPtrd2_sse4(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128); +IMPORT CONST int Sleef_getIntf4_sse4(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse4(int); +IMPORT CONST void *Sleef_getPtrf4_sse4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4(__m256d); +IMPORT CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4(__m256d); +IMPORT CONST __m256d Sleef_floord4(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4(__m256d); +IMPORT CONST __m256d Sleef_ceild4(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4(__m256d); +IMPORT CONST __m256d Sleef_roundd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4(__m256d); +IMPORT CONST __m256d Sleef_rintd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15(__m256d); +IMPORT CONST int Sleef_getIntd4(int); +IMPORT CONST void *Sleef_getPtrd4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8(__m256); +IMPORT CONST __m256 Sleef_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8(__m256); +IMPORT CONST __m256 Sleef_floorf8(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8(__m256); +IMPORT CONST __m256 Sleef_ceilf8(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8(__m256); +IMPORT CONST __m256 Sleef_roundf8(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8(__m256); +IMPORT CONST __m256 Sleef_rintf8(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8(__m256); +IMPORT CONST __m256 Sleef_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15(__m256); +IMPORT CONST int Sleef_getIntf8(int); +IMPORT CONST int Sleef_cinz_getIntf8(int); +IMPORT CONST void *Sleef_getPtrf8(int); +IMPORT CONST void *Sleef_cinz_getPtrf8(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d); +IMPORT CONST int Sleef_getIntd4_avx(int); +IMPORT CONST void *Sleef_getPtrd4_avx(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256); +IMPORT CONST int Sleef_getIntf8_avx(int); +IMPORT CONST int Sleef_cinz_getIntf8_avx(int); +IMPORT CONST void *Sleef_getPtrf8_avx(int); +IMPORT CONST void *Sleef_cinz_getPtrf8_avx(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d); +IMPORT CONST int Sleef_getIntd4_fma4(int); +IMPORT CONST void *Sleef_getPtrd4_fma4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256); +IMPORT CONST int Sleef_getIntf8_fma4(int); +IMPORT CONST int Sleef_finz_getIntf8_fma4(int); +IMPORT CONST void *Sleef_getPtrf8_fma4(int); +IMPORT CONST void *Sleef_finz_getPtrf8_fma4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d); +IMPORT CONST int Sleef_getIntd4_avx2(int); +IMPORT CONST void *Sleef_getPtrd4_avx2(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256); +IMPORT CONST int Sleef_getIntf8_avx2(int); +IMPORT CONST int Sleef_finz_getIntf8_avx2(int); +IMPORT CONST void *Sleef_getPtrf8_avx2(int); +IMPORT CONST void *Sleef_finz_getPtrf8_avx2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d); +IMPORT CONST int Sleef_getIntd2_avx2128(int); +IMPORT CONST void *Sleef_getPtrd2_avx2128(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128); +IMPORT CONST int Sleef_getIntf4_avx2128(int); +IMPORT CONST int Sleef_finz_getIntf4_avx2128(int); +IMPORT CONST void *Sleef_getPtrf4_avx2128(int); +IMPORT CONST void *Sleef_finz_getPtrf4_avx2128(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8(__m512d); +IMPORT CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8(__m512d); +IMPORT CONST __m512d Sleef_floord8(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8(__m512d); +IMPORT CONST __m512d Sleef_ceild8(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8(__m512d); +IMPORT CONST __m512d Sleef_roundd8(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8(__m512d); +IMPORT CONST __m512d Sleef_rintd8(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15(__m512d); +IMPORT CONST int Sleef_getIntd8(int); +IMPORT CONST void *Sleef_getPtrd8(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16(__m512); +IMPORT CONST __m512 Sleef_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16(__m512); +IMPORT CONST __m512 Sleef_floorf16(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16(__m512); +IMPORT CONST __m512 Sleef_ceilf16(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16(__m512); +IMPORT CONST __m512 Sleef_roundf16(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16(__m512); +IMPORT CONST __m512 Sleef_rintf16(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16(__m512); +IMPORT CONST __m512 Sleef_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15(__m512); +IMPORT CONST int Sleef_getIntf16(int); +IMPORT CONST int Sleef_finz_getIntf16(int); +IMPORT CONST void *Sleef_getPtrf16(int); +IMPORT CONST void *Sleef_finz_getPtrf16(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512f(int); +IMPORT CONST void *Sleef_getPtrd8_avx512f(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512); +IMPORT CONST int Sleef_getIntf16_avx512f(int); +IMPORT CONST int Sleef_finz_getIntf16_avx512f(int); +IMPORT CONST void *Sleef_getPtrf16_avx512f(int); +IMPORT CONST void *Sleef_finz_getPtrf16_avx512f(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrd8_avx512fnofma(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST int Sleef_getIntf16_avx512fnofma(int); +IMPORT CONST int Sleef_cinz_getIntf16_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrf16_avx512fnofma(int); +IMPORT CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purec(double); +IMPORT CONST double Sleef_cinz_sind1_u35purec(double); +IMPORT CONST double Sleef_cosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double); +IMPORT CONST double Sleef_tand1_u35purec(double); +IMPORT CONST double Sleef_cinz_tand1_u35purec(double); +IMPORT CONST double Sleef_asind1_u35purec(double); +IMPORT CONST double Sleef_cinz_asind1_u35purec(double); +IMPORT CONST double Sleef_acosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u35purec(double); +IMPORT CONST double Sleef_atand1_u35purec(double); +IMPORT CONST double Sleef_cinz_atand1_u35purec(double); +IMPORT CONST double Sleef_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_logd1_u35purec(double); +IMPORT CONST double Sleef_cinz_logd1_u35purec(double); +IMPORT CONST double Sleef_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_sind1_u10purec(double); +IMPORT CONST double Sleef_cinz_sind1_u10purec(double); +IMPORT CONST double Sleef_cosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double); +IMPORT CONST double Sleef_tand1_u10purec(double); +IMPORT CONST double Sleef_cinz_tand1_u10purec(double); +IMPORT CONST double Sleef_asind1_u10purec(double); +IMPORT CONST double Sleef_cinz_asind1_u10purec(double); +IMPORT CONST double Sleef_acosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u10purec(double); +IMPORT CONST double Sleef_atand1_u10purec(double); +IMPORT CONST double Sleef_cinz_atand1_u10purec(double); +IMPORT CONST double Sleef_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_logd1_u10purec(double); +IMPORT CONST double Sleef_cinz_logd1_u10purec(double); +IMPORT CONST double Sleef_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_expd1_u10purec(double); +IMPORT CONST double Sleef_cinz_expd1_u10purec(double); +IMPORT CONST double Sleef_powd1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_powd1_u10purec(double, double); +IMPORT CONST double Sleef_sinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u10purec(double); +IMPORT CONST double Sleef_coshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u10purec(double); +IMPORT CONST double Sleef_tanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u10purec(double); +IMPORT CONST double Sleef_sinhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u35purec(double); +IMPORT CONST double Sleef_coshd1_u35purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u35purec(double); +IMPORT CONST double Sleef_tanhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u35purec(double); +IMPORT CONST double Sleef_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_cinz_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_asinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_asinhd1_u10purec(double); +IMPORT CONST double Sleef_acoshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acoshd1_u10purec(double); +IMPORT CONST double Sleef_atanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_atanhd1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u35purec(double); +IMPORT CONST double Sleef_exp10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u10purec(double); +IMPORT CONST double Sleef_exp10d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u35purec(double); +IMPORT CONST double Sleef_expm1d1_u10purec(double); +IMPORT CONST double Sleef_cinz_expm1d1_u10purec(double); +IMPORT CONST double Sleef_log10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log10d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u35purec(double); +IMPORT CONST double Sleef_log1pd1_u10purec(double); +IMPORT CONST double Sleef_cinz_log1pd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double); +IMPORT CONST double Sleef_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cinz_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cospid1_u05purec(double); +IMPORT CONST double Sleef_cinz_cospid1_u05purec(double); +IMPORT CONST double Sleef_ldexpd1_purec(double, int32_t); +IMPORT CONST double Sleef_cinz_ldexpd1_purec(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_ilogbd1_purec(double); +IMPORT CONST double Sleef_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_cinz_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_purec(double); +IMPORT CONST double Sleef_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_fabsd1_purec(double); +IMPORT CONST double Sleef_cinz_fabsd1_purec(double); +IMPORT CONST double Sleef_copysignd1_purec(double, double); +IMPORT CONST double Sleef_cinz_copysignd1_purec(double, double); +IMPORT CONST double Sleef_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_fmind1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmind1_purec(double, double); +IMPORT CONST double Sleef_fdimd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fdimd1_purec(double, double); +IMPORT CONST double Sleef_truncd1_purec(double); +IMPORT CONST double Sleef_cinz_truncd1_purec(double); +IMPORT CONST double Sleef_floord1_purec(double); +IMPORT CONST double Sleef_cinz_floord1_purec(double); +IMPORT CONST double Sleef_ceild1_purec(double); +IMPORT CONST double Sleef_cinz_ceild1_purec(double); +IMPORT CONST double Sleef_roundd1_purec(double); +IMPORT CONST double Sleef_cinz_roundd1_purec(double); +IMPORT CONST double Sleef_rintd1_purec(double); +IMPORT CONST double Sleef_cinz_rintd1_purec(double); +IMPORT CONST double Sleef_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_cinz_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_frfrexpd1_purec(double); +IMPORT CONST double Sleef_cinz_frfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_expfrexpd1_purec(double); +IMPORT CONST double Sleef_fmodd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmodd1_purec(double, double); +IMPORT CONST double Sleef_remainderd1_purec(double, double); +IMPORT CONST double Sleef_cinz_remainderd1_purec(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double); +IMPORT CONST double Sleef_lgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_lgammad1_u10purec(double); +IMPORT CONST double Sleef_tgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_tgammad1_u10purec(double); +IMPORT CONST double Sleef_erfd1_u10purec(double); +IMPORT CONST double Sleef_cinz_erfd1_u10purec(double); +IMPORT CONST double Sleef_erfcd1_u15purec(double); +IMPORT CONST double Sleef_cinz_erfcd1_u15purec(double); +IMPORT CONST int Sleef_getIntd1_purec(int); +IMPORT CONST void *Sleef_getPtrd1_purec(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u35purec(float); +IMPORT CONST float Sleef_cosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float); +IMPORT CONST float Sleef_tanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u35purec(float); +IMPORT CONST float Sleef_asinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u35purec(float); +IMPORT CONST float Sleef_acosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u35purec(float); +IMPORT CONST float Sleef_atanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u35purec(float); +IMPORT CONST float Sleef_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_logf1_u35purec(float); +IMPORT CONST float Sleef_cinz_logf1_u35purec(float); +IMPORT CONST float Sleef_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_sinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u10purec(float); +IMPORT CONST float Sleef_cosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float); +IMPORT CONST float Sleef_tanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u10purec(float); +IMPORT CONST float Sleef_asinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u10purec(float); +IMPORT CONST float Sleef_acosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u10purec(float); +IMPORT CONST float Sleef_atanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u10purec(float); +IMPORT CONST float Sleef_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_logf1_u10purec(float); +IMPORT CONST float Sleef_cinz_logf1_u10purec(float); +IMPORT CONST float Sleef_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_expf1_u10purec(float); +IMPORT CONST float Sleef_cinz_expf1_u10purec(float); +IMPORT CONST float Sleef_powf1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_powf1_u10purec(float, float); +IMPORT CONST float Sleef_sinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u10purec(float); +IMPORT CONST float Sleef_coshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u10purec(float); +IMPORT CONST float Sleef_tanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u10purec(float); +IMPORT CONST float Sleef_sinhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u35purec(float); +IMPORT CONST float Sleef_coshf1_u35purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u35purec(float); +IMPORT CONST float Sleef_tanhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u35purec(float); +IMPORT CONST float Sleef_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_cinz_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_asinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinhf1_u10purec(float); +IMPORT CONST float Sleef_acoshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acoshf1_u10purec(float); +IMPORT CONST float Sleef_atanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanhf1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u35purec(float); +IMPORT CONST float Sleef_exp10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u10purec(float); +IMPORT CONST float Sleef_exp10f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u35purec(float); +IMPORT CONST float Sleef_expm1f1_u10purec(float); +IMPORT CONST float Sleef_cinz_expm1f1_u10purec(float); +IMPORT CONST float Sleef_log10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log10f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u35purec(float); +IMPORT CONST float Sleef_log1pf1_u10purec(float); +IMPORT CONST float Sleef_cinz_log1pf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float); +IMPORT CONST float Sleef_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cinz_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cospif1_u05purec(float); +IMPORT CONST float Sleef_cinz_cospif1_u05purec(float); +IMPORT CONST float Sleef_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_cinz_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_purec(float); +IMPORT CONST float Sleef_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_fabsf1_purec(float); +IMPORT CONST float Sleef_cinz_fabsf1_purec(float); +IMPORT CONST float Sleef_copysignf1_purec(float, float); +IMPORT CONST float Sleef_cinz_copysignf1_purec(float, float); +IMPORT CONST float Sleef_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_fminf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fminf1_purec(float, float); +IMPORT CONST float Sleef_fdimf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fdimf1_purec(float, float); +IMPORT CONST float Sleef_truncf1_purec(float); +IMPORT CONST float Sleef_cinz_truncf1_purec(float); +IMPORT CONST float Sleef_floorf1_purec(float); +IMPORT CONST float Sleef_cinz_floorf1_purec(float); +IMPORT CONST float Sleef_ceilf1_purec(float); +IMPORT CONST float Sleef_cinz_ceilf1_purec(float); +IMPORT CONST float Sleef_roundf1_purec(float); +IMPORT CONST float Sleef_cinz_roundf1_purec(float); +IMPORT CONST float Sleef_rintf1_purec(float); +IMPORT CONST float Sleef_cinz_rintf1_purec(float); +IMPORT CONST float Sleef_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_cinz_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_frfrexpf1_purec(float); +IMPORT CONST float Sleef_cinz_frfrexpf1_purec(float); +IMPORT CONST float Sleef_fmodf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmodf1_purec(float, float); +IMPORT CONST float Sleef_remainderf1_purec(float, float); +IMPORT CONST float Sleef_cinz_remainderf1_purec(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_modff1_purec(float); +IMPORT CONST float Sleef_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_erff1_u10purec(float); +IMPORT CONST float Sleef_cinz_erff1_u10purec(float); +IMPORT CONST float Sleef_erfcf1_u15purec(float); +IMPORT CONST float Sleef_cinz_erfcf1_u15purec(float); +IMPORT CONST int Sleef_getIntf1_purec(int); +IMPORT CONST int Sleef_cinz_getIntf1_purec(int); +IMPORT CONST void *Sleef_getPtrf1_purec(int); +IMPORT CONST void *Sleef_cinz_getPtrf1_purec(int); +#endif +#ifdef FP_FAST_FMA + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u35purecfma(double); +IMPORT CONST double Sleef_cosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double); +IMPORT CONST double Sleef_tand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u35purecfma(double); +IMPORT CONST double Sleef_asind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u35purecfma(double); +IMPORT CONST double Sleef_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_atand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u35purecfma(double); +IMPORT CONST double Sleef_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_logd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u35purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_sind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u10purecfma(double); +IMPORT CONST double Sleef_cosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double); +IMPORT CONST double Sleef_tand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u10purecfma(double); +IMPORT CONST double Sleef_asind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u10purecfma(double); +IMPORT CONST double Sleef_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_atand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u10purecfma(double); +IMPORT CONST double Sleef_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_logd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u10purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_expd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expd1_u10purecfma(double); +IMPORT CONST double Sleef_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_log1pd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log1pd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double); +IMPORT CONST double Sleef_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_ldexpd1_purecfma(double, int32_t); +IMPORT CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_ilogbd1_purecfma(double); +IMPORT CONST double Sleef_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_finz_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_fabsd1_purecfma(double); +IMPORT CONST double Sleef_finz_fabsd1_purecfma(double); +IMPORT CONST double Sleef_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_truncd1_purecfma(double); +IMPORT CONST double Sleef_finz_truncd1_purecfma(double); +IMPORT CONST double Sleef_floord1_purecfma(double); +IMPORT CONST double Sleef_finz_floord1_purecfma(double); +IMPORT CONST double Sleef_ceild1_purecfma(double); +IMPORT CONST double Sleef_finz_ceild1_purecfma(double); +IMPORT CONST double Sleef_roundd1_purecfma(double); +IMPORT CONST double Sleef_finz_roundd1_purecfma(double); +IMPORT CONST double Sleef_rintd1_purecfma(double); +IMPORT CONST double Sleef_finz_rintd1_purecfma(double); +IMPORT CONST double Sleef_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_frfrexpd1_purecfma(double); +IMPORT CONST double Sleef_finz_frfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_expfrexpd1_purecfma(double); +IMPORT CONST double Sleef_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_remainderd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_remainderd1_purecfma(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double); +IMPORT CONST double Sleef_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_erfcd1_u15purecfma(double); +IMPORT CONST double Sleef_finz_erfcd1_u15purecfma(double); +IMPORT CONST int Sleef_getIntd1_purecfma(int); +IMPORT CONST void *Sleef_getPtrd1_purecfma(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_cosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float); +IMPORT CONST float Sleef_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_logf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u35purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_cosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float); +IMPORT CONST float Sleef_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_logf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u10purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_expf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expf1_u10purecfma(float); +IMPORT CONST float Sleef_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_log1pf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log1pf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float); +IMPORT CONST float Sleef_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_finz_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_fabsf1_purecfma(float); +IMPORT CONST float Sleef_finz_fabsf1_purecfma(float); +IMPORT CONST float Sleef_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_truncf1_purecfma(float); +IMPORT CONST float Sleef_finz_truncf1_purecfma(float); +IMPORT CONST float Sleef_floorf1_purecfma(float); +IMPORT CONST float Sleef_finz_floorf1_purecfma(float); +IMPORT CONST float Sleef_ceilf1_purecfma(float); +IMPORT CONST float Sleef_finz_ceilf1_purecfma(float); +IMPORT CONST float Sleef_roundf1_purecfma(float); +IMPORT CONST float Sleef_finz_roundf1_purecfma(float); +IMPORT CONST float Sleef_rintf1_purecfma(float); +IMPORT CONST float Sleef_finz_rintf1_purecfma(float); +IMPORT CONST float Sleef_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_finz_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_remainderf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_remainderf1_purecfma(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float); +IMPORT CONST float Sleef_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_erff1_u10purecfma(float); +IMPORT CONST float Sleef_finz_erff1_u10purecfma(float); +IMPORT CONST float Sleef_erfcf1_u15purecfma(float); +IMPORT CONST float Sleef_finz_erfcf1_u15purecfma(float); +IMPORT CONST int Sleef_getIntf1_purecfma(int); +IMPORT CONST int Sleef_finz_getIntf1_purecfma(int); +IMPORT CONST void *Sleef_getPtrf1_purecfma(int); +IMPORT CONST void *Sleef_finz_getPtrf1_purecfma(int); +#endif +#ifdef __cplusplus +} +#endif + +#undef IMPORT +#endif // #ifndef __SLEEF_H__ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h new file mode 100644 index 0000000000000000000000000000000000000000..f4eb3afee1e5bde5e4bc6e1d0d7b767d744c0d2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h @@ -0,0 +1,143 @@ +#pragma once +#ifdef USE_CUDA +#include +#include +#include +#include +#include +#include +#include +namespace torch { + +TORCH_CUDA_CU_API bool CudaIPCCollect(); + +struct CudaIPCReceivedData final { + CudaIPCReceivedData() = default; + explicit CudaIPCReceivedData(std::shared_ptr shared_ptr) + : shared_ptr_(std::move(shared_ptr)) {} + std::shared_ptr shared_ptr_; +}; + +struct CudaIPCSentData final { + std::string handle_; + uint64_t offset_; + uint64_t* counter_ptr_; // Reference counter shared memory block + at::DataPtr original_ptr_; // Original mem allocation + cudaEvent_t event_; // Sync cuEventDestroy + bool event_sync_required_; + at::Device device_; + + CudaIPCSentData( + std::string handle, + uint64_t offset, + uint64_t* counter_ptr, + at::Device device); + ~CudaIPCSentData(); + + uint64_t counter_value(); + std::string handle() { + return handle_; + } + uint64_t offset() { + return offset_; + } + void set_original_ptr(at::DataPtr data_ptr) { + original_ptr_ = std::move(data_ptr); + } +}; + +TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData( + void* data, + at::Device device); + +namespace { + +inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000; +inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000; +// This was determined empirically that CUDA (v10.1 and below) have the limit +// on the number of recorded blocking interprocess events. It is around ~22,000. +// And to give us leeway, we picked 1000 as it gives us enough events to share +// tensors effectively. +inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000; + +// All to be deleted data blocks with non zero reference counter goes there +struct CudaIPCSentDataLimbo final { + ~CudaIPCSentDataLimbo(); + bool collect(); + void add(std::unique_ptr shared_block); + uint64_t size(); + + private: + // TODO: Can be changed to FIFO in order to avoid full traverse on every + // collect() + std::vector> shared_blocks_; + std::mutex limbo_mutex_; +}; + +struct CudaIPCRefCountersFile final { + CudaIPCRefCountersFile( + std::string handle, + uint64_t size, + at::DataPtr data_ptr) + : size_(size), + + handle_(std::move(handle)), + refcounted_shared_mem_(std::move(data_ptr)) {} + + uint64_t* counter_ptr() { + return static_cast(refcounted_shared_mem_.get()) + next_offset_; + } + + void set_counter(uint64_t value) { + *counter_ptr() = value; + } + + bool have_offsets() { + return next_offset_ < size_; + } + + bool offsets_in_use() { + return used_slots_; + } + + uint64_t get_offset() { + return next_offset_; + } + + void rotate_offset() { + next_offset_++; + used_slots_++; + } + + void return_offset(uint64_t offset /* unused */) { + used_slots_--; + } + + std::string handle() { + return handle_; + } + + private: + uint64_t next_offset_{0}; + uint64_t size_; + uint64_t used_slots_{0}; + std::string handle_; + at::DataPtr refcounted_shared_mem_; +}; + +} // namespace +} // namespace torch + +namespace c10 { +namespace { +class CudaIPCCollectCallback : public FreeMemoryCallback { + public: + bool Execute() override { + return torch::CudaIPCCollect(); + } +}; +} // namespace + +} // namespace c10 + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h new file mode 100644 index 0000000000000000000000000000000000000000..3142eb97a000173c776c1c4604665c51a7ba20cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +constexpr int DTYPE_NAME_LEN = 64; + +struct TORCH_API THPDtype { + PyObject_HEAD at::ScalarType scalar_type; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[DTYPE_NAME_LEN + 1]; +}; + +TORCH_API extern PyTypeObject THPDtypeType; + +inline bool THPDtype_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPDtypeType; +} + +inline bool THPPythonScalarType_Check(PyObject* obj) { + return obj == (PyObject*)(&PyFloat_Type) || + obj == (PyObject*)(&PyBool_Type) || obj == (PyObject*)(&PyLong_Type); +} + +TORCH_API PyObject* THPDtype_New( + at::ScalarType scalar_type, + const std::string& name); + +void THPDtype_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h new file mode 100644 index 0000000000000000000000000000000000000000..265582e0ddfaea8f997dc15ccc20fa2800db54b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +#include + +const int LAYOUT_NAME_LEN = 64; + +struct THPLayout { + PyObject_HEAD at::Layout layout; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[LAYOUT_NAME_LEN + 1]; +}; + +extern PyTypeObject THPLayoutType; + +inline bool THPLayout_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPLayoutType; +} + +PyObject* THPLayout_New(at::Layout layout, const std::string& name); + +void THPLayout_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..30809ff10be90e2d091002ce4c2abb8e731b8d0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h @@ -0,0 +1,7 @@ +#pragma once + +#include +#include + +TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter(); +TORCH_PYTHON_API bool isMainPyInterpreter(); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb75304c0ed0bf885a058e6f08d0cc8fe23ec3b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +#include + +constexpr int QSCHEME_NAME_LEN = 64; + +struct THPQScheme { + PyObject_HEAD at::QScheme qscheme; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + char name[QSCHEME_NAME_LEN + 1]; +}; + +extern PyTypeObject THPQSchemeType; + +inline bool THPQScheme_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPQSchemeType; +} + +PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name); + +void THPQScheme_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h new file mode 100644 index 0000000000000000000000000000000000000000..803abf1832f000084c8e55cf147c51fbc511a0cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h @@ -0,0 +1,8 @@ +#ifndef THP_STORAGE_SHARING_INC +#define THP_STORAGE_SHARING_INC + +#include + +PyMethodDef* THPStorage_getSharingMethods(); + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..91f1abe0516ce5555a8460e6fca232bd518e8ad0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h @@ -0,0 +1,23 @@ +#ifndef THP_STREAM_INC +#define THP_STREAM_INC + +#include +#include +#include + +struct THPStream { + PyObject_HEAD int64_t stream_id; + int64_t device_type; + int64_t device_index; +}; +extern TORCH_API PyTypeObject* THPStreamClass; + +void THPStream_init(PyObject* module); + +inline bool THPStream_Check(PyObject* obj) { + return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass); +} + +PyObject* THPStream_Wrap(const c10::Stream& stream); + +#endif // THP_STREAM_INC diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h new file mode 100644 index 0000000000000000000000000000000000000000..23512f1bce424865d82fc47f8af58740845b8ff4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h @@ -0,0 +1,19 @@ +#pragma once + +#define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y) +#define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y + +#define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z) +#define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z + +#define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w) +#define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w + +#define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y) +#define TH_CONCAT_2_EXPAND(x, y) x##y + +#define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z) +#define TH_CONCAT_3_EXPAND(x, y, z) x##y##z + +#define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w +#define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h new file mode 100644 index 0000000000000000000000000000000000000000..88d8489ba7b8f9dac36837cfa0816c6fd0375b4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h @@ -0,0 +1,30 @@ +#ifndef THP_H +#define THP_H + +#include +#include + +// Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/ +// define PyInt_* macros for Python 3.x. NB: We must include Python.h first, +// otherwise we'll incorrectly conclude PyInt_Check isn't defined! +#ifndef PyInt_Check +#define PyInt_Check PyLong_Check +#define PyInt_FromLong PyLong_FromLong +#define PyInt_AsLong PyLong_AsLong +#define PyInt_Type PyLong_Type +#endif + +#include +#include +#include +#include +#include +#include +#include // This requires defined Storage and Tensor types +#include + +#include + +#include + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h new file mode 100644 index 0000000000000000000000000000000000000000..97d12e4eea5c6bbea483a1e2ebfd1a1ed7065411 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +#include + +struct THPDTypeInfo { + PyObject_HEAD at::ScalarType type; +}; + +struct THPFInfo : THPDTypeInfo {}; + +struct THPIInfo : THPDTypeInfo {}; + +extern PyTypeObject THPFInfoType; +extern PyTypeObject THPIInfoType; + +inline bool THPFInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPFInfoType; +} + +inline bool THPIInfo_Check(PyObject* obj) { + return Py_TYPE(obj) == &THPIInfoType; +} + +void THPDTypeInfo_init(PyObject* module); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h new file mode 100644 index 0000000000000000000000000000000000000000..22d95b8669837120e30e7bd92f322207ef2c0166 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include + +namespace torch::autograd { + +using InferenceMode = c10::InferenceMode; + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..e92e091e4c3e54c78abc35ed1ad2c6b2d9d14957 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h @@ -0,0 +1,445 @@ +#pragma once + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#ifdef Type +#undef Type +#endif +#endif + +namespace torch { +namespace autograd { +enum class can_mutate_inplace_result { + success, + non_default_backward_view, + view_of_leaf, + is_leaf, +}; + +// The requires_grad argument is used to know if the inplace operation needs +// gradient to be setup for it. +// In particular, we can have tensor.requires_grad() != requires_grad when +// writing a Tensor that requires gradients inplace into a Tensor that does not +// require gradients: a = torch.rand(2) b = torch.rand(2, requires_grad=True) +// a.copy_(b) +inline can_mutate_inplace_result can_mutate_inplace( + const at::Tensor& tensor, + bool requires_grad) { + if (!requires_grad || !GradMode::is_enabled()) { + return can_mutate_inplace_result::success; + } + auto diff_view_meta = impl::get_view_autograd_meta(tensor); + if (diff_view_meta && diff_view_meta->has_bw_view()) { + if (diff_view_meta->get_creation_meta() != CreationMeta::DEFAULT) { + return can_mutate_inplace_result::non_default_backward_view; + } + if (tensor.requires_grad() && tensor._base().is_leaf()) { + return can_mutate_inplace_result::view_of_leaf; + } + } + if (tensor.requires_grad() && tensor.is_leaf()) { + return can_mutate_inplace_result::is_leaf; + } + return can_mutate_inplace_result::success; +} + +inline void check_inplace(const at::Tensor& tensor, bool requires_grad) { + switch (can_mutate_inplace(tensor, requires_grad)) { + case can_mutate_inplace_result::success: + return; + case can_mutate_inplace_result::non_default_backward_view: { + return handle_view_on_rebase(impl::get_view_autograd_meta(tensor)); + } + case can_mutate_inplace_result::view_of_leaf: + TORCH_CHECK( + false, + "a view of a leaf Variable that requires grad is being used in an in-place operation."); + break; + + case can_mutate_inplace_result::is_leaf: + TORCH_CHECK( + false, + "a leaf Variable that requires grad is being used in an in-place operation."); + break; + } + TORCH_INTERNAL_ASSERT(false); +} + +inline void check_inplace(at::ITensorListRef tensors, bool requires_grad) { + for (const auto& tensor : tensors) { + check_inplace(tensor, requires_grad); + } +} + +inline void throw_error_out_requires_grad(const char* name) { + AT_ERROR( + name, + "(): functions with out=... arguments don't support automatic differentiation, " + "but one of the arguments requires grad."); +} + +inline void throw_error_for_complex_autograd( + const at::Tensor& tensor, + const char* name) { + if (tensor.requires_grad()) { + TORCH_CHECK( + !tensor.is_complex(), + name, + " does not support automatic differentiation for outputs with complex dtype."); + } +} + +inline void throw_error_if_base_and_tensor_are_same( + const at::Tensor& base, + const at::Tensor& tensor) { + TORCH_CHECK( + base.unsafeGetTensorImpl() != tensor.unsafeGetTensorImpl(), + "View operation returned a tensor that is the same as the input base tensor. This " + "is no longer allowed; you must explicitly create a new tensor (e.g., using .detach()). " + "As a user, you could have made a mistake implementing __torch_dispatch__ or a Python " + "operator decomposition or meta registration; if that's not the case, please " + "report a bug to PyTorch or the backend you are using."); +} + +inline void throw_error_for_complex_autograd( + at::ITensorListRef tensorlist, + const char* name) { + for (const auto& tensor : tensorlist) { + throw_error_for_complex_autograd(tensor, name); + } +} + +// TODO: Blegh, bare references + +inline void rebase_history(const Variable& var, std::shared_ptr grad_fn) { + if (grad_fn && var.defined()) { + grad_fn->add_input_metadata(var); + impl::rebase_history(var, {std::move(grad_fn), 0}); + } +} + +inline void rebase_history( + const std::vector& vars, + const std::shared_ptr& grad_fn) { + if (grad_fn) { + for (auto& var : vars) { + if (var.defined()) { + auto output_nr = grad_fn->add_input_metadata(var); + impl::rebase_history(var, {grad_fn, output_nr}); + } else { + grad_fn->add_input_metadata(Node::undefined_input()); + } + } + } +} + +inline void increment_version(const at::Tensor& t) { + impl::bump_version(t); +} + +struct Flatten : IterArgs { + Flatten(variable_list& out) : out(out) {} + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + variable_list& out; + void operator()(const at::Tensor& x) { + out.emplace_back(x); + } + void operator()(const c10::optional& x) { + if (x.has_value()) + out.emplace_back(x.value()); + } + void operator()(at::ArrayRef xs) { + out.insert(out.end(), xs.begin(), xs.end()); + } +}; + +template +inline variable_list flatten_tensor_args(Args&&... args) { + variable_list out; + out.reserve(count_tensors(std::forward(args)...)); + Flatten(out).apply(std::forward(args)...); + return out; // RVO +} + +// See NOTE [ Autograd View Variables ] for details. +inline at::Tensor as_view( + const at::Tensor& base, + const at::Tensor& tensor, + bool is_bw_differentiable, + bool is_fw_differentiable, + std::unique_ptr view_func = nullptr, + std::function rev_view_func = nullptr, + CreationMeta creation_meta = CreationMeta::DEFAULT, + bool allow_tensor_metadata_change = true) { + // Note [View of inference tensor] + // For inference tensor this code can only be hit outside InferenceMode + // since ADInplaceOrView is in the default_included_set. + // If Inplace and View were separate dispatch keys we can just put Inplace + // in the default_included_set, so that view ops on inference tensor doesn't + // have to go through as_view even outside InferenceMode. + if (base.is_inference()) + return tensor; + + auto diff_view_meta = torch::autograd::impl::get_view_autograd_meta(base); + + // To speed up the most common case, we specially handle when both the forward + // and backward view infos are the same, and so a single shared ViewInfo can + // be used for both of them. + if ((!diff_view_meta || diff_view_meta->shared_view_info()) && + is_bw_differentiable && is_fw_differentiable) { + throw_error_if_base_and_tensor_are_same(base, tensor); + if (diff_view_meta) { + creation_meta = propagate_creation_meta( + diff_view_meta->get_creation_meta(), creation_meta); + return make_variable_differentiable_view( + tensor, + diff_view_meta->get_backward_view().chain( + base, tensor, std::move(view_func), std::move(rev_view_func)), + c10::nullopt, + /*shared_view_info*/ true, + creation_meta, + allow_tensor_metadata_change); + } else { + return make_variable_differentiable_view( + tensor, + ViewInfo(base, std::move(view_func), std::move(rev_view_func)), + c10::nullopt, + /*shared_view_info*/ true, + creation_meta, + allow_tensor_metadata_change); + } + } + + // If they cannot be shared, create the required view infos + c10::optional new_bw_info; + c10::optional new_fw_info; + + if (is_bw_differentiable) { + auto bw_view_func = view_func ? view_func->clone_and_set() : nullptr; + if (diff_view_meta && diff_view_meta->has_bw_view()) { + const auto& base_bw_info = diff_view_meta->get_backward_view(); + new_bw_info = base_bw_info.chain( + base, tensor, std::move(bw_view_func), rev_view_func); + } else { + new_bw_info = ViewInfo(base, std::move(bw_view_func), rev_view_func); + } + } else { + TORCH_CHECK( + creation_meta == CreationMeta::DEFAULT, + "Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT"); + } + + if (is_fw_differentiable) { + // Check if base is a forward differentiable view + if (diff_view_meta && diff_view_meta->has_fw_view()) { + const auto& base_fw_info = diff_view_meta->get_forward_view(); + new_fw_info = base_fw_info.chain( + base, tensor, std::move(view_func), std::move(rev_view_func)); + } else { + new_fw_info = + ViewInfo(base, std::move(view_func), std::move(rev_view_func)); + } + } + + if (is_fw_differentiable || is_bw_differentiable) { + if (diff_view_meta && diff_view_meta->has_bw_view()) { + creation_meta = propagate_creation_meta( + diff_view_meta->get_creation_meta(), creation_meta); + } + throw_error_if_base_and_tensor_are_same(base, tensor); + return make_variable_differentiable_view( + tensor, + std::move(new_bw_info), + std::move(new_fw_info), + /*shared_view_info*/ false, + creation_meta, + allow_tensor_metadata_change); + } else { + return make_variable_non_differentiable_view( + base, tensor, allow_tensor_metadata_change); + } +} + +inline void check_no_requires_grad( + const at::Tensor& tensor, + const char* name, + const char* fn_name = "", + bool check_grad_mode = true) { + TORCH_CHECK( + !(tensor.defined() && tensor.requires_grad()) || + !(check_grad_mode && GradMode::is_enabled()), + "The function '", + fn_name, + "' is not differentiable with respect to argument '", + name, + "'. This input cannot have requires_grad True."); +} + +inline void check_no_requires_grad( + const c10::optional& tensor, + const char* name, + const char* fn_name = "") { + if (tensor.has_value()) { + check_no_requires_grad(*tensor, name, fn_name); + } +} + +inline void check_no_requires_grad( + at::ITensorListRef tensors, + const char* name, + const char* fn_name = "") { + // GradMode check is expensive, so check it only once for TensorLists + if (!GradMode::is_enabled()) { + return; + } + for (auto& tensor : tensors) { + check_no_requires_grad(tensor, name, fn_name, /*check_grad_mode*/ false); + } +} + +inline void check_no_requires_grad( + const c10::List>& tensors, + const char* name, + const char* fn_name = "") { + // GradMode check is expensive, so check it only once for TensorLists + if (!GradMode::is_enabled()) { + return; + } + for (c10::optional tensor : tensors) { + if (tensor.has_value()) { + check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false); + } + } +} + +// Assumed that saved tensor lists are never inplace outputs +inline std::vector make_saved_variable_list( + at::ITensorListRef tensors, + const bool is_output = false) { + return fmap(tensors, [&is_output](const at::Tensor& tensor) -> SavedVariable { + return SavedVariable{tensor, is_output /* is output */}; + }); +} + +// Assumed that saved tensor lists are never inplace outputs +inline std::vector make_saved_variable_list( + const c10::List>& tensors, + const bool is_output = false) { + return fmap( + tensors, + [&is_output](const c10::optional& tensor) -> SavedVariable { + if (tensor.has_value()) { + return SavedVariable{*tensor, is_output /* is output */}; + } else { + return SavedVariable{at::Tensor(), is_output /* is output */}; + } + }); +} + +inline std::vector> to_args_sizes( + at::ITensorListRef tensors) { + std::vector> args_sizes(tensors.size()); + size_t i = 0; + for (const auto& t : tensors) { + args_sizes[i++] = t.sizes().vec(); + } + return args_sizes; +} + +inline std::vector> to_args_sizes_symint( + at::ITensorListRef tensors) { + std::vector> args_sizes(tensors.size()); + size_t i = 0; + for (const auto& t : tensors) { + args_sizes[i++] = t.sym_sizes().vec(); + } + return args_sizes; +} + +inline std::vector to_args_scalartypes( + at::ITensorListRef tensors) { + std::vector args_scalartypes(tensors.size()); + size_t i = 0; + for (const auto& t : tensors) { + args_scalartypes[i++] = t.scalar_type(); + } + return args_scalartypes; +} + +namespace impl { + +namespace { + +// If run_jit_decomposition were not a member function, we would be able +// to pass this as a template parameter to c10::Boxedkernel::makeFromFunction. +// However, member functions cannot be passed this way - instead we wrap our +// call in this functor so it can be passed to c10::BoxedKernel::makeFromFunctor +class WrapperFunctor final : public c10::OperatorKernel { + public: + WrapperFunctor(JitDecompInterface* impl) : impl_(impl){}; + + void operator()( + const c10::OperatorHandle& op, + c10::DispatchKeySet ks, + torch::jit::Stack* stack) { + impl_->run_jit_decomposition(op, stack); + } + JitDecompInterface* impl_; +}; + +} // namespace + +template +Return run_jit_decomposition_with_args_for_jvp( + c10::string_view name, + const c10::OperatorHandle& opHandle, + c10::DispatchKeySet dispatchKeySet, + Args&&... args) { + // see NOTE: [Jit Decomposition Interface] + JitDecompInterface* impl = getJitDecompImpl(); + + TORCH_CHECK_NOT_IMPLEMENTED( + impl && impl->has_jit_decomposition(opHandle.schema()), + "Trying to use forward AD with ", + name, + " that does not support it because it has not been implemented yet.\nPlease file an issue " + "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml " + "so that we can prioritize its implementation.\n" + "Note that forward AD support for some operators require PyTorch to be built with " + "TorchScript and for JIT to be enabled. " + "If the environment var PYTORCH_JIT=0 is set or if the library is not built with TorchScript, " + "some operators may no longer be used with forward AD."); + + return c10::KernelFunction::makeFromBoxedKernel( + c10::BoxedKernel::makeFromFunctor( + std::make_unique(impl))) + .call( + opHandle, dispatchKeySet, std::forward(args)...); +} + +} // namespace impl + +} // namespace autograd +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h new file mode 100644 index 0000000000000000000000000000000000000000..e1e4e5c0380f784741554df4f28d7b5931333721 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace torch::autograd { + +// Default DispatchKey::Autograd fallback for built-in operators. +// Can be registered for custom operators. +TORCH_API torch::CppFunction autogradNotImplementedFallback(); + +// Default DispatchKey::AdInplaceOrView fallback for built-in operators +// Can be registered for custom operators. +TORCH_API torch::CppFunction autogradNotImplementedInplaceOrViewFallback(); + +// Default DispatchKey::Autograd fallback for all other operators (i.e. custom +// operators) +TORCH_API torch::CppFunction basicAutogradNotImplementedFallback(); + +enum class AutogradFallbackMode { + Nothing, // Fallback is a redispatch + Warn, // Fallback raises a warning if backward is called + Error, // Fallback raises an error if backward is called +}; + +// Change the behavior of "basicAutogradNotImplementedFallback" +// In Python this is: +// - torch._C._set_autograd_fallback_mode(str) -> None +// - torch._C._get_autograd_fallback_mode() -> str +TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode); +TORCH_API AutogradFallbackMode getAutogradFallbackMode(); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h new file mode 100644 index 0000000000000000000000000000000000000000..ddfa2bb973159841b07259c4880d351bc10fa076 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h @@ -0,0 +1,425 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::autograd { + +using optional_variable_list = std::vector>; +using _jvp_fn_t = std::function; +using _view_as_self_fn_t = std::function; + +TORCH_API std::vector> _wrap_outputs( + const variable_list& input_vars, + const std::unordered_set& non_differentiable, + const std::unordered_set& dirty_inputs, + const at::ArrayRef> raw_outputs, + const std::shared_ptr& cdata, + const _jvp_fn_t& jvp_user_function, + const std::unordered_set& to_save_if_setup_context, + const _view_as_self_fn_t& view_as_self_fn); + +TORCH_API void check_variable_result( + const at::TensorBase& original, + const at::TensorBase& result, + const std::string& hook_name); + +// Get the return type of the forward function of the custom Function class X +template +using forward_t = decltype(X::forward(nullptr, std::declval()...)); + +/// To use custom autograd operations, implement a Function subclass with +/// static forward and backward functions: +/// +/// `forward` can take as many arguments as you want and should return either a +/// variable list or a Variable. Use of any direct Variable arguments will be +/// registered in the graph but no vectors/sets or any other data structures +/// will be traversed. You can use c10::optional as one of the arguments +/// and it will be registered as a variable in the graph if the argument has a +/// value. It should take a pointer to `torch::autograd::AutogradContext` as the +/// first argument. Variables can be saved in the `ctx` using +/// `ctx->save_for_backward` +/// (see `torch::autograd::AutogradContext::save_for_backward`) and other data +/// can be saved in the `ctx->saved_data` map +/// (see `torch::autograd::AutogradContext::saved_data`) +/// in the form of `` pairs. +/// +/// `backward` should take a pointer to `torch::autograd::AutogradContext` +/// and a variable list containing as many Variables as there were outputs from +/// `forward` as arguments. It should return as many Variables as there were +/// inputs with each of them containing the gradient w.r.t. its corresponding +/// input. Variables saved in `forward` can be accessed with +/// `ctx->get_saved_variables` (see +/// `torch::autograd::AutogradContext::get_saved_variables`) and other saved +/// data can be accessed from `ctx->saved_data`. +/// +/// For example: +/// ``` +/// class MyFunction : public Function { +/// public: +/// static variable_list forward(AutogradContext *ctx, int n, Variable var) { +/// // Save data for backward in context +/// ctx->saved_data["n"] = n; +/// var.mul_(2); +/// // Mark var as modified by inplace operation +/// ctx->mark_dirty({var}); +/// return {var}; +/// } +/// +/// static variable_list backward(AutogradContext *ctx, variable_list +/// grad_output) { +/// // Use data saved in forward +/// auto n = ctx->saved_data["n"].toInt(); +/// return {grad_output[0]*n}; +/// } +/// }; +/// ``` +/// +/// To use `MyFunction`: +/// ``` +/// Variable x; +/// auto y = MyFunction::apply(6, x); +/// // Example backward call +/// y[0].sum().backward(); +/// ``` +template +struct TORCH_API Function { + // We need to use a different template parameter than T here because T will + // inherit from Function, and when Function is instantiated, T::forward + // is not declared yet. + // The enable_if check is to ensure that the user doesn't explicitly provide + // the parameter X. + template + static auto apply(Args&&... args) + -> std::enable_if_t, forward_t>; +}; + +/// Context to save information during `forward` that can be accessed in +/// `backward` in custom autograd operations (see `torch::autograd::Function` +/// for details). +struct TORCH_API AutogradContext { + AutogradContext() = default; + AutogradContext(const AutogradContext& other) = delete; + AutogradContext& operator=(const AutogradContext& other) = delete; + + /// Can be used to save non-variable data for `backward`. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + ska::flat_hash_map saved_data; + + /// Saves the list of variables for a future call to `backward`. This + /// should be called at most once from inside of `forward`. + void save_for_backward(variable_list to_save); + /// Marks variables in the list as modified in an in-place operation. This + /// should be called at most once from inside of `forward` and all arguments + /// should be inputs. + void mark_dirty(const variable_list& inputs); + /// Marks outputs in the list as not requiring gradients. This should be + /// called at most once from inside of `forward` and all arguments should be + /// outputs. + void mark_non_differentiable(const variable_list& outputs); + // Sets whether undefined output grad tensors should be expanded to tensors + // full of zeros before calling backward function. Default value is true. + void set_materialize_grads(bool value); + + /// Get the list of variables that were saved in `forward` using + /// `save_for_backward()`. Before returning them to the user, a check is made + /// to ensure that they were not modified by any in-place operations. + variable_list get_saved_variables() const; + const std::unordered_set& get_and_bump_dirty() const; + const std::unordered_set& get_non_differentiable() const; + + /// Expose the Node's `task_should_compute_output` method to the cpp + /// custom autograd Function as `needs_input_grad`. + bool needs_input_grad(size_t output_edge_index) const; + bool needs_input_grad(std::initializer_list idxs) const; + + private: + std::unordered_set non_differentiable_; + std::unordered_set dirty_inputs_; + std::vector saved_variables_; + variable_list to_save_; + bool materialize_grads_{true}; + + // The CppNode in the autograd graph that owns this AutogradContext. We need a + // weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it + // will always be alive when we want to use it. + std::weak_ptr grad_fn_; + bool has_freed_buffers_{false}; + + void save_variables(); + + template + friend struct CppNode; +}; + +// CppNode is the Node in the autograd graph that represents the user defined +// backward function for Function. Calls to CppNode::apply are forward to +// T::backward(). +template +struct CppNode : public Node { + variable_list apply(variable_list&& inputs) override; + AutogradContext ctx_; + std::vector is_variable_input_; + std::vector input_info_; + std::vector output_info_; + + void release_variables() override; + + void set_ctx_grad_fn(const std::shared_ptr& node); + void save_variables_to_ctx(); +}; + +struct ExtractVariables : IterArgs { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + std::vector& is_var_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + variable_list& list_; + ExtractVariables(std::vector& is_var, variable_list& list) + : is_var_(is_var), list_(list) {} + void operator()(const c10::optional& x) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (x.has_value() && x.value().defined()) { + is_var_.push_back(true); + list_.emplace_back(x.value()); + } else { + is_var_.push_back(false); + } + } + void operator()(const at::Tensor& x) { + is_var_.push_back(true); + list_.emplace_back(x); + } + void operator()(const at::TensorList& list) { + for (const at::Tensor& x : list) { + is_var_.push_back(true); + list_.emplace_back(x); + } + } + template + void operator()(const T& x) { + is_var_.push_back(false); + } +}; + +template +inline void extract_vars( + std::vector& is_var, + variable_list& list, + Args&&... args) { + ExtractVariables(is_var, list).apply(std::forward(args)...); +} + +template +std::enable_if_t, T> to_output_type( + std::vector>& output_list) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + variable_list result; + std::transform( + output_list.begin(), + output_list.end(), + std::back_inserter(result), + [](const c10::optional& var) { return *var; }); + return result; +} + +template +std::enable_if_t, T> to_output_type( + std::vector>& output_list) { + return *output_list[0]; +} + +inline std::vector> to_optional(Variable& output) { + return std::vector>{output}; +} + +inline std::vector> to_optional(variable_list& output) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector> result; + std::transform( + output.begin(), + output.end(), + std::back_inserter(result), + [](const Variable& var) { return var; }); + return result; +} + +template +template +auto Function::apply(Args&&... args) + -> std::enable_if_t, forward_t> { + const auto& functorch_tls = at::functorch::functorchTLSAccessor(); + if (functorch_tls) { + // Function support for functorch is handled in Python. + // Here we are dealing with a (C++) Function, which is not supported. + // Let's raise an error instead of being silently incorrect. + functorch_tls->checkSupportsCppAutogradFunction(); + } + + std::shared_ptr> node(new CppNode(), deleteNode); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + variable_list input_vars; + + const size_t num_inputs = sizeof...(Args); + input_vars.reserve(num_inputs); + node->is_variable_input_.reserve(num_inputs); + // TODO Add tracing here + extract_vars(node->is_variable_input_, input_vars, args...); + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool is_executable = + GradMode::is_enabled() && any_variable_requires_grad(input_vars); + auto next_edges = + (is_executable ? collect_next_edges(input_vars) : edge_list()); + node->set_ctx_grad_fn(node); + node->set_next_edges(std::move(next_edges)); + node->clear_input_metadata(); + + node->input_info_.reserve(input_vars.size()); + for (auto& var : input_vars) { + node->input_info_.emplace_back(var); + } + + using forward_return_t = forward_t; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + forward_return_t outputs; + { + AutoGradMode grad_mode(false); + outputs = T::forward(&node->ctx_, std::forward(args)...); + } + + _jvp_fn_t jvp_fn = [](const variable_list& inputs, + const variable_list& gI) -> variable_list { + TORCH_CHECK( + false, + "jvp is not implemented for the c++ API of custom Function yet.", + "Please open a feature request on GitHub if you need this."); + }; + + auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor { + return x.view_as(x); + }; + + auto wrapped_outputs = _wrap_outputs( + input_vars, + node->ctx_.get_non_differentiable(), + node->ctx_.get_and_bump_dirty(), + to_optional(outputs), + is_executable ? node : nullptr, + jvp_fn, + {}, + view_as_self_fn); + + node->output_info_.reserve(wrapped_outputs.size()); + for (auto& output : wrapped_outputs) { + if (is_executable && output.has_value()) { + node->output_info_.emplace_back(output.value()); + } else if (is_executable) { + node->output_info_.emplace_back(); + } + } + + if (is_executable) { + node->save_variables_to_ctx(); + } + + // wrapped_outputs will be a variable_list so, convert it to the correct + // return type. Only Variable and variable_list are accepted as return types. + return to_output_type(wrapped_outputs); +} + +// The logic here is the same as PyNode::apply, so changes to it should be done +// in both the places +template +// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved) +variable_list CppNode::apply(variable_list&& inputs) { + at::OptionalDeviceGuard _device_guard; + + auto num_inputs = inputs.size(); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + variable_list backward_inputs; + backward_inputs.reserve(num_inputs); + for (const auto i : c10::irange(num_inputs)) { + if (inputs[i].defined() || !ctx_.materialize_grads_) { + backward_inputs.emplace_back(std::move(inputs[i])); + } else { + backward_inputs.emplace_back(output_info_[i].zeros(_device_guard)); + } + } + + // Acquire lock to here protect thread safety on custom C++ Autograd Node + // This is needed for the custom Autograd Node since we don't know if the + // user defined Node will write to the shared data during backward. + // see Note [Thread Safety on Autograd Node] + std::lock_guard lock(mutex_); + + auto outputs = T::backward(&ctx_, backward_inputs); + + const auto num_forward_inputs = + static_cast(is_variable_input_.size()); + auto num_outputs = static_cast(outputs.size()); + // Returning too many results is ok, but only as long as they're all + // undefined. Truncate the result vector in that case. + if (num_outputs > num_forward_inputs) { + bool all_undef = true; + for (const auto i : c10::irange(num_forward_inputs, num_outputs)) { + all_undef &= (!outputs[i].defined()); + } + if (all_undef) { + outputs.resize(num_forward_inputs); + num_outputs = num_forward_inputs; + } + } + + if (num_outputs != num_forward_inputs) { + std::string msg("function "); + msg += name() + " returned an incorrect number of gradients (expected "; + msg += c10::to_string(num_forward_inputs) + ", got "; + msg += c10::to_string(num_outputs) + ")"; + throw std::runtime_error(msg); + } + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + variable_list results; + results.reserve(num_outputs); + for (const auto i : c10::irange(num_outputs)) { + if (!is_variable_input_[i]) { + if (outputs[i].defined()) { + std::string msg("function "); + msg += name() + + " returned a gradient different that is defined at position "; + msg += c10::to_string(i + 1) + + ", but the corresponding forward input was not a Variable"; + throw std::runtime_error(msg); + } + continue; + } + results.emplace_back(outputs[i]); + } + return results; +} + +template +void CppNode::release_variables() { + // lock to ensure thread safety, see [Thread Safety on Autograd Node] + std::lock_guard lock(mutex_); + ctx_.saved_variables_.clear(); + ctx_.has_freed_buffers_ = true; +} + +template +void CppNode::save_variables_to_ctx() { + ctx_.save_variables(); +} + +template +void CppNode::set_ctx_grad_fn(const std::shared_ptr& node) { + ctx_.grad_fn_ = node; +} + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h new file mode 100644 index 0000000000000000000000000000000000000000..f8108d529535e1443a66d182d7d3cb2358f55009 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch::autograd { + +struct Node; + +/// Represents a particular input of a function. +struct Edge { + Edge() noexcept : function(nullptr), input_nr(0) {} + + Edge(std::shared_ptr function_, uint32_t input_nr_) noexcept + : function(std::move(function_)), input_nr(input_nr_) {} + + /// Convenience method to test if an edge is valid. + bool is_valid() const noexcept { + return function != nullptr; + } + + // Required for use in associative containers. + bool operator==(const Edge& other) const noexcept { + return this->function == other.function && this->input_nr == other.input_nr; + } + + bool operator!=(const Edge& other) const noexcept { + return !(*this == other); + } + + /// The function this `Edge` points to. + std::shared_ptr function; + + /// The identifier of a particular input to the function. + uint32_t input_nr; +}; +} // namespace torch::autograd + +// The idiomatic way of enabling use of a custom type as the key of hash +// containers in C++11. This method removes the requirement of having to pass +// a custom hasher to std::unordered_{map, set}. +// See http://en.cppreference.com/w/cpp/utility/hash for more information. +namespace std { +template <> +struct hash { + // These type aliases are required by the standard. + using argument_type = torch::autograd::Edge; + using return_type = size_t; + return_type operator()(const argument_type& edge) const noexcept { + return c10::get_hash(edge.function, edge.input_nr); + } +}; +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h new file mode 100644 index 0000000000000000000000000000000000000000..becc73396e66dba5c22cc89154650c6c67e3487e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h @@ -0,0 +1,763 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::autograd { + +struct Edge; +struct FunctionPostHook; +struct FunctionPreHook; + +using tensor_list = std::vector; +using variable_list = std::vector; +using edge_list = std::vector; +using saved_variable_list = std::vector; +using IndexRange = std::pair; +using torch::dynamo::autograd::CompiledNodeArgs; +using torch::dynamo::autograd::SwapSavedVariables; + +// Custom deleter to prevent stack overflows. +TORCH_API void deleteNode(Node* function); + +// Guard that sets and restores the evaluating node +class NodeGuard { + public: + explicit NodeGuard(std::shared_ptr node); + ~NodeGuard(); + + private: + std::shared_ptr last_evaluating_node_; +}; + +// Return the Node currently being evaluated (if any) +// This is only set during the backward pass while a Node is being +// executed. +TORCH_API std::shared_ptr get_current_node(); + +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Node +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A `Node` is an abstract class that represents an operation taking zero +// or more input `Variable`s and producing zero or more output `Variable`s. All +// functions in PyTorch's autograd machinery derive from this class and +// override its `apply` method. Instances of such subclasses will then be +// invokable via the call operator. +// +// Nodes in the Autograd Graph +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// When viewing the autograd system as a graph, `Node`s are the vertices or +// nodes, connected to each other via (directed) `Edge`s, which themselves are +// represented via (`Node`, input_nr) pairs. `Variable`s are the outputs to +// and inputs of `Node`s, and travel between these edges during execution +// of the graph. When two or more `Edge`s (from different sources) point at the +// same input to a `Node`, the values produced along all of these edges are +// implicitly summed prior to being forwarded to the target `Node`. +// +// Hierarchy +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Subclasses usually represent differentiable functions as well as their +// gradient operators. Note, however, that due to the very general definition +// of a `Node` taking *zero* or more inputs and producing *zero* or more +// outputs, uses of `Node`s are flexible and extend beyond purely +// mathematical operations. For example, the `AccumulateGrad` function is a +// *sink*: it takes one input, but produces no outputs, instead accumulating +// the input as a side effect. At the other extreme, the `GraphRoot` function +// receives no inputs from other functions, but produces multiple outputs. +// +// Interface +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// The most important method on `Node` is the call operator, which takes in +// a list of variables and produces a list of variables. The precise size of +// these lists can be determined with `num_inputs()` and `num_outputs()`. +// `Node`s are stitched together via their `next_edge` interface, which let +// you manipulate the set of outgoing edges of a `Node`. You can add an +// edge with `add_next_edge()`, retrieve an edge with `next_edge(index)` and +// iterate over them via the `next_edges()` method. Other methods exist for +// integration with the JIT and other parts of PyTorch. Every `Node` has a +// *sequence number* that increases monotonically in the order of `Node` +// construction. It can be retrieved via the `sequence_nr()` method. Note that +// this sequence number is *thread local*. This means that when `Node`s +// `A`, `B` and `C` are created consecutively in the same thread, their +// sequence numbers will be ordered `A` < `B` < `C`. If, however, `A` and `B` +// are created in one thread and `C` is created in a new thread, there are *no +// guarantees* w.r.t. the ordering of `C` relative to `A` or `B`. +// See NOTE [ Sequence Number] for more details on the usages of sequence +// number. +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +struct TORCH_API Node : std::enable_shared_from_this { + public: + /// Construct a new `Node` with the given `next_edges` + explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list()) + : sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) { + for (const Edge& edge : next_edges_) { + update_topological_nr(edge); + } + + if (AnomalyMode::is_enabled()) { + metadata()->store_stack(); + + // If anomaly mode is enabled and graph is constructed, then assign the + // currently evaluating node as the parent of this node. + // A parent is a Node where this Node is created. + // We are tracking the parents to track multiple backward operations. + assign_parent(); + } + + // Store the thread_id of the forward operator. + // See NOTE [ Sequence Numbers ] + thread_id_ = at::RecordFunction::currentThreadId(); + } + + explicit Node(edge_list&& next_edges = edge_list()) + : Node( + /*sequence_nr=*/at::sequence_number::get_and_increment(), + std::move(next_edges)) {} + + /// Nodes are neither copyable nor moveable. + Node(const Node& other) = delete; + Node(Node&& other) = delete; + Node& operator=(const Node& other) = delete; + Node& operator=(Node&& other) = delete; + virtual ~Node() = default; + + std::shared_ptr getptr() { + return shared_from_this(); + } + /// Evaluates the function on the given inputs and returns the result of the + /// function call. + variable_list operator()(variable_list&& inputs) { + // In the first iteration of named tensors, autograd ignores names and + // operates on unnamed tensors. In the long term, autograd should + // probably operate with names. + at::NoNamesGuard no_names_guard; + +#ifdef USE_ROCM + // Keep track of backward pass for rocblas. + at::ROCmBackwardPassGuard in_backward; +#endif + + auto step_callbacks = + at::getStepCallbacksUnlessEmpty(at::RecordScope::BACKWARD_FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value())) { + at::RecordFunction guard(std::move(*step_callbacks)); + // Using sequence number and thread id to correlate with + // the forward pass function + guard.setForwardThreadId(thread_id_); + if (guard.needsInputs()) { + std::vector inputs_vec(inputs.begin(), inputs.end()); + guard.before( + name(), + c10::ArrayRef( + inputs_vec.data(), inputs_vec.size()), + static_cast(sequence_nr())); + } else { + guard.before(name(), static_cast(sequence_nr())); + } + return apply(std::move(inputs)); + } else { + return apply(std::move(inputs)); + } + } + + // Graph Connectivity API + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // Inputs. NOTE: inputs of the grad_fn correspond to Tensor outputs of the + // forward function. + + // Marker for expected undefined input + struct undefined_input {}; + + /// Adds the type and shape metadata for a new input. Returns the index of + /// of the new input. + uint32_t add_input_metadata( + const at::TensorOptions& options, + c10::SymIntArrayRef shape, + bool is_tensor_subclass, + bool is_nested) noexcept { + uint32_t input_nr = input_metadata_.size(); + auto meta_shape = MetadataShape{std::in_place_type, shape}; + input_metadata_.emplace_back( + options, meta_shape, is_tensor_subclass, is_nested); + return input_nr; + } + + uint32_t add_input_metadata(const at::Tensor& t) noexcept { + uint32_t input_nr = input_metadata_.size(); + input_metadata_.emplace_back(t); + return input_nr; + } + + /// Adds a placeholder for an input that will not be used. + uint32_t add_input_metadata(undefined_input u) noexcept { + uint32_t input_nr = input_metadata_.size(); + input_metadata_.emplace_back(); + return input_nr; + } + + uint32_t num_inputs() const noexcept { + return input_metadata_.size(); + } + + const InputMetadata& input_metadata(size_t index) const { + return input_metadata_[index]; + } + + // Danger: not thread safe, caller must protect with lock + InputMetadata& mutable_input_metadata(size_t index) { + return input_metadata_[index]; + } + + /** + * Note: Function Streams + * A function's stream (for a given device type) is the stream of the first + * element of its input buffer on a device of that type. + * + * If all elements are on the same device they MUST share a stream. If + * elements are on different devices (across multiple GPUs, for example) + * they may have different streams. + */ + c10::optional stream() { + auto opt_device_type = at::getAccelerator(); + if (!opt_device_type.has_value()) { + return c10::nullopt; + } + for (const auto& metadata : input_metadata_) { + if (metadata.device().type() == opt_device_type.value()) + return metadata.stream(); + } + + return c10::nullopt; + } + + void clear_input_metadata() { + input_metadata_.clear(); + } + + // Outputs ("Next Edges") + + void update_topological_nr(const Edge& edge) { + TORCH_INTERNAL_ASSERT( + !has_parent_, + "Cannot update a node's topological_nr after it already has a parent." + " If we allow this, we can no longer guarantee that a parent's" + " topo_nr is always greater than those of all its children") + Node* node = edge.function.get(); + if (node) { + auto topo_nr = node->topological_nr(); + if (topological_nr_ <= topo_nr) { + topological_nr_ = topo_nr + 1; + } + } + } + + void set_next_edge(size_t index, Edge edge) { + update_topological_nr(edge); + next_edges_[index] = std::move(edge); + } + + void add_next_edge(Edge edge) { + update_topological_nr(edge); + next_edges_.emplace_back(std::move(edge)); + } + + void set_next_edges(edge_list&& next_edges) { + next_edges_ = std::move(next_edges); + for (const auto& next_edge : next_edges_) { + update_topological_nr(next_edge); + } + } + + const Edge& next_edge(size_t index) const noexcept { + return next_edges_[index]; + } + + const edge_list& next_edges() const noexcept { + return next_edges_; + } + + edge_list& next_edges() noexcept { + return next_edges_; + } + + uint32_t num_outputs() const noexcept { + return next_edges_.size(); + } + + // Miscellaneous Methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// NOTE [ Sequence Number] + /// + /// The sequence_nr has two main usages in autograd: + /// + /// 1) Helps determine the node's execution priority in the engine. + /// All else being equal, nodes with higher priority numbers are executed + /// first. Thus, nodes corresponding to ops executed later are the first to + /// be executed in the backward pass. One caveat is that we prioritize + /// AccumulateGrad nodes by explicitly setting its sequence_nr to be + /// UINT64_MAX. + /// 2) The sequence number of this `Node` is paired with with thread_id it was + /// created in + /// as a unique identifier by the profiler to annotate recorded events. + /// The purpose of this is to help users (and possibly programs) + /// interpreting the profiler's output to correlate backward nodes with its + /// forward ops. We need both sequence_nr and thread_id to identify a node + /// because sequence_nr is thread_local, i.e., starts counting up from zero + /// in a new thread + uint64_t sequence_nr() const noexcept { + return sequence_nr_; + } + + void set_sequence_nr(uint64_t sequence_nr) { + sequence_nr_ = sequence_nr; + } + + // NOTE [ Topological Number ] + // + // topological_nr is used to prune branches in the DAG during autograd + // discovery as maintaining topological_nr helps us check in O(1) if there + // does NOT exist a directed path between two nodes. + // + // The topological order number of this `Node` representing the length of the + // longest possible path from this Node to any leaf node. If you are leaf + // node, aka AccumulateGrad, this will be zero. This value has the property + // that For every pair of nodes X, Y in G, existence of a directed path from X + // to Y implies topo_nr(X) > topo_nr(Y). The converse is not true, however, so + // we cannot prove existence of a path from X to Y, only non-existence. + // + // One assumption we make when using topo_nr is that once a node + // has been used, i.e., has a parent node, its own topo_nr does not change + // we have added some checks with the `has_parent_` field to enforce this. + // + // What NOT to do: + // + // 1) 2 -> 1 -> 0 In this diagram we label nodes with their + // topo_nr. + // 2 -> 1 -> 0 We have two simple graphs that can each + // arise from + // `t.exp().exp()`, for example. + // 2) 2 -> 1 -> 0 + // / + // 2 -> 1 -> 0 We add 2 as a next edge to 1 even though 1 + // already + // has a parent. + // 3) 2 -> 1 -> 0 + // / + // 2 -> 3 -> 0 2 < 3, yet there exists a path from 2 to 3! + // + uint64_t topological_nr() const noexcept { + has_parent_ = true; + return topological_nr_; + } + + // assigning a node as a parent to this node + void assign_parent(); + + /// Id of the thread that created Node + uint64_t thread_id() const noexcept { + return thread_id_; + } + + /// Returns the name of the dynamic type of the function, for debugging. + virtual std::string name() const; + + /// The difference between functions `should_compute_output` and + /// `task_should_compute_output`: + /// - `should_compute_output` should only be used during graph construction + /// and takes into account only requires_grad information + /// - `task_should_compute_output` should only be called during the backward + /// pass (unless called directly through grad_fn) and takes into account the + /// current graph task. Specifically, the autograd engine trims unnecessary + /// edges when `inputs` are specified, and during backward untrimmed nodes + /// left on the graph can/should check `task_should_compute_output` to see if + /// any outgoing edges have been trimmed by the engine. If that is the case, + /// gradient computation wrt those edges can be omitted. + /// + /// Returns true if the particular output edge is active, and that particular + /// output of this function should be computed. + bool should_compute_output(size_t output_edge_index) const { + TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range"); + return next_edges_[output_edge_index].is_valid(); + } + + /// Returns true if any of the output edges in any of the ranges are active. + bool should_compute_output(std::initializer_list idxs) const { + return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) { + for (const auto i : c10::irange(range.first, range.second)) { + if (should_compute_output(i)) + return true; + } + return false; + }); + } + + /// Same as the above `should_compute_output` function but will also + /// check whether this edge is needed within the current graph task. + bool task_should_compute_output(size_t output_edge_index) const { + TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range"); + const auto& next = next_edges_[output_edge_index]; + if (next.is_valid()) { + const auto exec_info = get_current_graph_task_exec_info(); + if (exec_info && !exec_info->empty()) { + auto it = exec_info->find(next.function.get()); + if (it == exec_info->end() || !it->second.should_execute()) { + return false; // this edge is not needed for the current graph_task + } + } + return true; + } + return false; + } + + /// Returns true if any of the output edges in any of the ranges are active + /// and should be computed in the current graph task. + bool task_should_compute_output( + std::initializer_list idxs) const { + return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) { + for (const auto i : c10::irange(range.first, range.second)) { + if (task_should_compute_output(i)) + return true; + } + return false; + }); + } + + /// Returns the `PyObject` stored for this `Node` (for Python + /// interaction). + PyObject* pyobj() const noexcept { + return pyobj_; + } + + /// Sets the `PyObject` stored for this `Node` (for Python interaction). + void set_pyobj(PyObject* pyobj) noexcept { + pyobj_ = pyobj; + } + + /// Returns the anomaly metadata stored for this `Node`. + /// If none exist, creates a new empty one. + AnomalyMetadata* metadata() noexcept; + + // Hook API + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + uintptr_t add_post_hook(std::unique_ptr&& post_hook) { + post_hooks_.emplace_back(std::move(post_hook)); + // Use the raw pointer as the unique key to identify this hook. This key + // can then be used in del_post_hook(key) to remove this hook. + return reinterpret_cast(post_hooks_.back().get()); + } + + const std::vector>& post_hooks() + const noexcept { + return post_hooks_; + } + + // delete a post hook matching the key + bool del_post_hook(const uintptr_t& key) { + for (auto it = post_hooks_.begin(); it != post_hooks_.end(); ++it) { + if (key == reinterpret_cast(it->get())) { + post_hooks_.erase(it); + return true; + } + } + return false; + } + + std::vector>& post_hooks() noexcept { + return post_hooks_; + } + + void add_pre_hook(std::unique_ptr&& pre_hook) { + pre_hooks_.emplace_back(std::move(pre_hook)); + } + + void add_tensor_pre_hook(std::unique_ptr&& pre_hook) { + tensor_pre_hooks_.emplace_back(std::move(pre_hook)); + } + + void add_retains_grad_hook( + std::unique_ptr&& pre_hook, + size_t output_idx) { + retains_grad_hooks_[output_idx] = std::move(pre_hook); + } + + std::unique_ptr pop_retains_grad_hook(size_t output_idx) { + auto ret = std::move(retains_grad_hooks_[output_idx]); + retains_grad_hooks_.erase(output_idx); + return ret; + } + + const std::vector>& pre_hooks() + const noexcept { + return pre_hooks_; + } + + std::vector>& pre_hooks() noexcept { + return pre_hooks_; + } + + virtual std::vector>& + tensor_pre_hooks() noexcept { + return tensor_pre_hooks_; + } + + virtual std::unique_ptr& + tensor_post_acc_grad_hooks() noexcept { + static std::unique_ptr empty = nullptr; + return empty; + } + + std::unordered_map>& + retains_grad_hooks() noexcept { + return retains_grad_hooks_; + } + + // Customization Points for Subclasses + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// Releases saved variables if the operation won't be reused. + virtual void release_variables() {} + + /// Called before an apply if `release_variables()` is going to be called. + /// Allows larger ops like `InterpreterAutogradFunction` to incrementally + /// release variables as they run. + virtual void will_release_variables() {} + + /// Returns true if this function is traceable. An op is traceable if all + /// operations happening within `apply()` are performed on autograd + /// `Variables` (i.e. apply mostly instantiates and applies other functions). + virtual bool is_traceable() { + return false; + } + + /// A `Node` is said to pass state transparently to backward, if the + /// state consists only of (Saved)Variables and only non-variable objects + /// that parameterize the operation in some way that defines the graph + /// structure AND the backward function is traceable. In particular, + /// parametrization MUST NOT depend on the data of any `Variable`. + /// TODO: it might be possible to handle cases where backward is + /// non-traceable but state passing could be considered transparent. This + /// will probably depend on saved_variable_list being mutable. + /// NOTE: this value matters only if is_traceable() returns false. + virtual bool passes_state_transparently() { + return false; + } + + // see [Note: Compiled Autograd] + // Used by compiled autograd to + // 1) Extract tensors/symint args + // 2) Collect node information for specialization and caching + // Implementations in subclasses should call args.collect() with all node + // attrs. These functions are only called durring backward. + virtual void compiled_args(CompiledNodeArgs& args) { + throw std::runtime_error( + std::string("compiled_args not implemented: ") + name()); + } + + // Used by compiled autograd to call apply() with different saved tensors + // Implementations should call saved.before() on all attrs, then apply(), then + // saved.after() on all attrs in the same order. + virtual variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) { + throw std::runtime_error( + std::string("apply_with_saved not implemented: ") + name()); + } + + protected: + /// Performs the `Node`'s actual operation. + virtual variable_list apply(variable_list&& inputs) = 0; + + /// Calls `apply()`, but instruments it with tracing machinery. + variable_list traced_apply(variable_list inputs); + + // Sequence number used to correlate backward nodes with forward ops in the + // profiler and provide determinism in the engine. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + uint64_t sequence_nr_; + + // See NOTE [ Topological Number ] + uint64_t topological_nr_ = 0; + + // Tracks whether this node has been added as the next_edge of another node + // via set_next_edge(s), which always calls topological_nr() of all its + // children See NOTE [ Topological Number ] for why we need this. + mutable bool has_parent_ = false; + + // Id of the thread that created the instance + uint64_t thread_id_ = 0; + + // Note [Thread Safety on Autograd Node] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Autograd Engine let the owning thread which calls Engine::execute to drive + // the GraphTask execution, there might be cases that part of the GraphTask is + // shared across different `backward()` or `grad()` calls, i.e. fork new + // threads in the middle of the forward and call `backward()` separately from + // different threads. We need to protect the thread safety on NodeTask to + // prevent data racing on shared variables read/write. + // + // NB: This is only needed for Autograd Nodes that runs on CPU, technically + // "CUDA", "XLA" nodes don't need locking because device threads are always + // single threaded. + // + // Here we add a thread mutex to help protect the Node's thread safety, so + // that different threads cannot race the shared data when executing the same + // NodeTask from multiple CPU threads. It IS the user/developer responsibility + // to take advantage of this mutex to protect the thread safety of their + // autograd Node. The general strategy of thread safety on autograd Node: + // + // 1. User should lock the mutex during Node::release_variables() if the Node + // needs + // to release the variables on the fly, this serve the purpose that when we + // release saved_variables from one thread, no other threads can release + // the saved variables concurrently. call the Node::apply(), + // 2. User should lock the mutex during Node::apply(), this is to ensure Node + // that + // writing to the shared variable are not racing across threads (i.e. + // AccumulateGrad and custom C++ Autograd Node if writing to shared + // variables ) + // 3. item 2 and item 3 should work together so that when we release saved + // variables + // from one thread, no other threads can call Node::apply(), this ensures + // the variable references from other threads aren't dangling. + // 4. if the Node don't release any variables and no shared data read/write in + // the Node + // i.e. purely functional, user don't need to lock the mutex + // + // This way we could protect the thread safety on Autograd Node, but we could + // still not protect the thread safety on Node pre/post C++ hooks (python + // hooks are automatically thread safe), we rely on the user to write thread + // safe C++ hooks if they want the hook to be correctly applied in + // multithreading environment. + std::mutex mutex_; + + edge_list next_edges_; + PyObject* pyobj_ = nullptr; // weak reference + std::unique_ptr anomaly_metadata_ = nullptr; + + // NOTE [Hooks ordering] + // We have 3 separate fields for pre hooks registered to the autograd nodes + // because the conditions under which they execute are different, and we + // want more fine-grained control over the order in which different types + // of hooks are executed. + // - pre_hooks are only executed when the node itself is executed + // - tensor_pre_hook is executed as long as the engine traverses over it + // even if that node won't be executed. + // - retains_grad_hook are like tensor_pre_hooks except they are always + // ordered after all other tensor pre hooks + std::vector> pre_hooks_; + std::vector> tensor_pre_hooks_; + std::unordered_map> + retains_grad_hooks_; + std::vector> post_hooks_; + at::SmallVector input_metadata_; +}; + +/// See Node::is_traceable() for definition. +struct TraceableFunction : public Node { + using Node::Node; + bool is_traceable() final { + return true; + } +}; + +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Associated Free Nodes +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +namespace detail { +// Implementation of `collect_next_edges` (see below). +struct MakeNextFunctionList : IterArgs { + edge_list next_edges; + using IterArgs::operator(); + void operator()(const Variable& variable) { + if (variable.defined()) { + next_edges.emplace_back(impl::gradient_edge(variable)); + } else { + next_edges.emplace_back(); + } + } + void operator()(const Variable* variable) { + operator()(*variable); + } + void operator()(const c10::optional& variable) { + if (variable.has_value()) { + operator()(*variable); + } else { + next_edges.emplace_back(); + } + } +}; +} // namespace detail + +/// Create an `Edge` between the given `variable` and the `function`, which is +/// assumed to be the gradient function of this variable (i.e. the function +/// through which this variable is backpropagated during the backward pass). +/// This sets the `grad_fn` property of the `variable`. This function assumes +/// that the `Variable` is a new input to the gradient function and its +/// `input_nr` thus equal to `function->num_inputs()`. Additionally, it +/// increments the `Node`'s number of inputs by one. Approximately +/// equivalent to `variable.set_gradient_edge(function, +/// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`. +/// If you don't want the `Node`'s `num_inputs` to be incremented, use +/// `set_gradient_edge` directly. +inline void create_gradient_edge( + Variable& variable, + std::shared_ptr function) { + // Copy before move. + const auto input_nr = function->add_input_metadata(variable); + impl::set_gradient_edge(variable, {std::move(function), input_nr}); +} + +/// Return true if any of the variables in the list require a gradient. +inline bool any_variable_requires_grad(const variable_list& variables) { + return std::any_of( + variables.begin(), variables.end(), [](const Variable& variable) { + return variable.defined() && variable.requires_grad(); + }); +} + +/// Return the next edges of all the given variables, or tuples of variables. +template +edge_list collect_next_edges(Variables&&... variables) { + detail::MakeNextFunctionList make; + make.apply(std::forward(variables)...); + return std::move(make.next_edges); +} + +struct TypeAndSize { + TypeAndSize() : options(at::TensorOptions()) {} + /* implicit */ + TypeAndSize(const at::Tensor& t) + : sym_sizes(t.sym_sizes().vec()), options(t.options()) {} + + at::Tensor zeros(); + + std::vector sym_sizes; + at::TensorOptions options; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..6342bf280a5cecc4850734d4a84f0b6dd29b4d65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::dynamo::autograd { +class CompiledNodeArgs; +class SwapSavedVariables; +} // namespace torch::dynamo::autograd + +// A hook that's called on gradients + +namespace torch::autograd { + +using Variable = at::Tensor; +using variable_list = std::vector; + +struct TORCH_API FunctionPreHook { + virtual ~FunctionPreHook() = default; + virtual variable_list operator()(const variable_list& grads) = 0; + // only implemented for python hooks, registers hook with compiled autograd + virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) { + throw std::runtime_error( + std::string("compiled_args nyi, see [Note: Compiled Autograd] ") + + typeid(*this).name()); + } +}; + +struct TORCH_API FunctionPostHook { + virtual ~FunctionPostHook() = default; + virtual variable_list operator()( + const variable_list& outputs /* grad_inputs */, + const variable_list& inputs /* grad_outputs */) = 0; + // only implemented for python hooks, registers hook with compiled autograd + virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) { + throw std::runtime_error( + std::string("compiled_args nyi, see [Note: Compiled Autograd] ") + + typeid(*this).name()); + } +}; + +struct TORCH_API PostAccumulateGradHook { + virtual ~PostAccumulateGradHook() = default; + virtual void operator()(const Variable& tensor) = 0; + // only implemented for python hooks on nodes, registers hook with compiled + // autograd + virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) { + throw std::runtime_error( + std::string("not yet implemented for compiled autograd: ") + + typeid(*this).name()); + } + + virtual void apply_with_saved( + Variable&, + torch::dynamo::autograd::SwapSavedVariables&) { + throw std::runtime_error( + std::string("not yet implemented for compiled autograd: ") + + typeid(*this).name()); + } +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..19e8eca4c0de58a75fcf1ee90898e497965e2e4b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch::autograd { + +using GradMode = at::GradMode; +using AutoGradMode = at::AutoGradMode; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..1f74e72cae7cfc7ea93ceb9ba7f1461724e99991 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h @@ -0,0 +1,113 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace torch::autograd { + +using SymIntSmallVec = c10::SmallVector; +using MetadataShape = std::variant; + +/** + * Records TensorOptions, shape of the tensor, whether or not the Python + * dispatch key is set (tensor subclass), and, where applicable, the stream the + * corresponding operation took place on. + * + * If is_valid() is false, then the corresponding input is not used and may be + * an undefined tensor. + */ +struct TORCH_API InputMetadata { + InputMetadata() = default; + InputMetadata( + const at::TensorOptions& options, + MetadataShape input_shape, + bool is_tensor_subclass, + bool is_nested); + InputMetadata(const at::Tensor& t); + + const at::TensorOptions& options() const { + return options_; + } + + caffe2::TypeMeta dtype() const { + return options_.dtype(); + } + + at::Device device() const { + return options_.device(); + } + + at::Layout layout() const { + return options_.layout(); + } + + c10::Stream stream() const { + return stream_; + } + + bool is_tensor_subclass() const { + return is_tensor_subclass_; + } + + at::Tensor zeros_like() const; + + bool is_same_shape(const at::Tensor& grad) const; + + bool is_expandable_to_shape(const at::Tensor& grad) const; + + at::Tensor reduce_grad(at::Tensor& grad) const; + + at::Tensor maybe_reduce( + const size_t index, + at::Tensor grad, + const std::function& format_error) const; + + std::stringstream incompatible_shape_error_message( + const size_t index, + const at::Tensor& grad) const; + + bool was_default_constructed() const { + return was_default_constructed_; + } + + bool is_cpp_nested_tensor() const; + + bool is_nested_tensor() const { + return is_nested_; + } + + c10::SymIntArrayRef shape_as_dim_vector() const; + + // Danger: not thread safe, caller must protect with lock + SymIntSmallVec& mutable_shape_as_dim_vector(); + + private: + at::Tensor shape_as_tensor() const; + bool is_nestedness_same(const at::Tensor& grad) const; + bool maybe_expandable_to(const at::Tensor& grad) const; + + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const at::TensorOptions options_; + MetadataShape shape_; + c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device()); + bool is_tensor_subclass_ = false; + bool is_nested_ = false; + bool was_default_constructed_ = true; +}; +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h new file mode 100644 index 0000000000000000000000000000000000000000..f76e53c91b02b2150d30acaa4418eeccb78bda5b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd::profiler::python_tracer { + +void init(); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h new file mode 100644 index 0000000000000000000000000000000000000000..bd81a28334b331928140dbc6925811407b116368 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace torch::autograd { + +struct THPCppFunction { + PyObject_HEAD std::shared_ptr cdata; +}; + +template +PyObject* CppFunction_pynew( + PyTypeObject* type, + PyObject* args, + PyObject* kwds) { + THPObjectPtr obj(type->tp_alloc(type, 0)); + if (!obj) + return nullptr; + THPCppFunction* f = (THPCppFunction*)obj.get(); + HANDLE_TH_ERRORS + new (&f->cdata) std::shared_ptr(Ctor()(args)); + END_HANDLE_TH_ERRORS + if (!f->cdata) { + return nullptr; + } + return obj.release(); +} + +#define THP_FUNCTION_DEFAULT_METHODS \ + {(char*)"_register_hook_dict", \ + THPCppFunction_register_hook_dict, \ + METH_O, \ + nullptr}, \ + {(char*)"register_hook", THPCppFunction_register_hook, METH_O, nullptr}, \ + {(char*)"register_prehook", \ + THPCppFunction_register_prehook, \ + METH_O, \ + nullptr}, \ + {(char*)"name", THPCppFunction_name, METH_NOARGS, nullptr}, \ + {(char*)"_sequence_nr", \ + THPCppFunction_sequence_nr, \ + METH_NOARGS, \ + nullptr}, \ + { \ + (char*)"_set_sequence_nr", THPCppFunction_set_sequence_nr, METH_O, nullptr \ + } + +#define THP_FUNCTION_DEFAULT_PROPERTIES \ + {(char*)"next_functions", \ + THPCppFunction_next_functions, \ + nullptr, \ + nullptr, \ + nullptr}, \ + {(char*)"requires_grad", \ + THPCppFunction_requires_grad, \ + nullptr, \ + nullptr, \ + nullptr}, \ + { \ + (char*)"metadata", THPCppFunction_metadata, nullptr, nullptr, nullptr \ + } + +PyObject* THPCppFunction_next_functions(PyObject* self, void* _unused); +PyObject* THPCppFunction_metadata(PyObject* self, void* _unused); +PyObject* THPCppFunction_requires_grad(PyObject* self, void* _unused); +PyObject* THPCppFunction_register_hook_dict(PyObject* self, PyObject* _var); +PyObject* THPCppFunction_register_hook(PyObject* self, PyObject* hook); +PyObject* THPCppFunction_register_prehook(PyObject* self, PyObject* hook); + +PyObject* THPCppFunction_name(PyObject* self, PyObject* noargs); +PyObject* THPCppFunction_sequence_nr(PyObject* self, PyObject* noargs); + +PyTypeObject* _initFunctionPyTypeObject( + PyTypeObject& type, + const char* name, + PyGetSetDef* function_properties, + PyMethodDef* function_methods); + +PyObject* registerFunctionHook(Node& fn, PyObject* hook); + +PyObject* registerFunctionPreHook(Node& fn, PyObject* hook); + +template +PyTypeObject* createForwardFunctionPyTypeObject( + PyTypeObject& type, + const char* name, + PyGetSetDef* function_properties = nullptr, + PyMethodDef* function_methods = nullptr) { + type.tp_new = &CppFunction_pynew; + return _initFunctionPyTypeObject( + type, name, function_properties, function_methods); +} + +void registerCppFunction(const std::type_info& type, PyTypeObject* pytype); +PyObject* functionToPyObject(const std::shared_ptr& cdata); + +bool THPCppFunction_Check(PyObject* obj); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h new file mode 100644 index 0000000000000000000000000000000000000000..3018fcd5a463b204a523ee9052fc89b13a776dbd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include + +bool THPEngine_initModule(PyObject* module); + +namespace torch::autograd::python { + +struct PythonEngine : public Engine { + static Engine& get_python_engine(); + ~PythonEngine() override; + void thread_init( + int device, + const std::shared_ptr& ready_queue, + bool should_increment) override; + void thread_on_exception( + std::shared_ptr graph_task, + const std::shared_ptr& fn, + std::exception& e) override; + variable_list execute( + const edge_list& roots, + const variable_list& inputs, + bool keep_graph, + bool create_graph, + bool accumulate_grad, + const edge_list& outputs = {}) override; + + c10::intrusive_ptr execute_with_graph_task( + const std::shared_ptr& graph_task, + std::shared_ptr graph_root, + InputBuffer&& input_buffer) override; + + std::unique_ptr make_anomaly_metadata() override; + std::unique_ptr get_default_saved_variable_hooks() + override; + + private: + PythonEngine(); +}; + +} // namespace torch::autograd::python diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_fft_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_fft_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..b95d25effcbb4fcb4f477ee27913dabafd79a5fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_fft_functions.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd { + +void initFFTFunctions(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_function.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_function.h new file mode 100644 index 0000000000000000000000000000000000000000..12eb94a7390461668d380847a6db6c871367798a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_function.h @@ -0,0 +1,160 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +namespace torch::jit { +struct Graph; +} + +namespace torch::autograd { + +// A Function which is implemented by a Python object (i.e., a THPFunction). +// Calls to 'apply' are forwarded to the Python method implementation. +struct PyNode : public Node { + PyNode(THPObjectPtr obj) : obj(obj.release()) {} + + PyObject* to_py_args( + const variable_list& inputs, + at::OptionalDeviceGuard* device_guard); + variable_list to_variable_list( + const PyObject* r, + const std::vector& is_variable_input); + + variable_list apply(variable_list&& inputs) override; + variable_list compiled_apply( + variable_list&& inputs, + std::optional compiler); + + void release_variables() override; + std::string name() const override; + bool is_traceable() override; + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + bool compiled_autograd_should_lift() const; + + // THPFunction this Function is wrapping. Owning! + PyObject* obj; + + // The AutogradCompilerCall::hooks idx corresponding to this node's backward + std::optional _backward_idx; + + // The AutogradCompilerCall::hooks idx corresponding to this node's + // backward_state + std::optional _backward_state_idx; + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~PyNode() override { + // Can't use THPObjectPtr as a field in this class; destructor won't take + // out GIL! When I forgot to do this by hand + // TestAutograd.test_inplace_view_python called me out about it. + // If python is already dead, leak the wrapped python objects + if (Py_IsInitialized()) { + pybind11::gil_scoped_acquire gil; + Py_DECREF(obj); + } + } +}; + +/** + * Cast an object into a tuple, if it is not a tuple already. Returns true + * if the original object was not a tuple. + */ +inline bool ensure_tuple(THPObjectPtr& obj) { + if (PyTuple_Check(obj.get())) + return false; + + PyObject* tuple = PyTuple_New(1); + if (!tuple) + throw python_error(); + PyTuple_SET_ITEM(tuple, 0, obj.release()); + obj = tuple; + return true; +} + +} // namespace torch::autograd + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THPFunction { + PyObject_HEAD + + PyObject* needs_input_grad; + + // Python tuple of tensors whose variables we should save. Set + // by Python with 'save_for_backward'. If nullptr, no tensors were + // saved. + PyObject* to_save; + // Python tuple of tensors which are not differentiable. Set by + // Python with 'mark_non_differentiable'. If nullptr, no tensors were + // non-differentiable. + PyObject* non_differentiable; + // Python tuple of tensors which had inplace updates in the forward() + // pass. Set by Python with 'mark_dirty'. If nullptr, no tensors were + // modified inplace. + PyObject* dirty_tensors; + + // boolean indicating whether to materialize undefined output grad tensors + // into tensors full of zeros. Set by Python with 'set_materialize_grads'. + // Default is true. + bool materialize_grads; + + // boolean indicating whether to materialize output grad tensors + // corresponding to non-differentiable outputs. Normally, someone would + // already get this behavior by switching off materialize_grads, + // but there are certain use cases where that is not feasible: + // https://github.com/pytorch/pytorch/pull/98659#pullrequestreview-1376822560 + bool materialize_non_diff_grads; + + // This is enabled by compiled autograd as a way to signal to AotAutograd it + // should call the original FX graph rather than compiling. + bool compiled_autograd_tracing; + PyObject* compiled_autograd_backward_state; + std::vector compiled_autograd_symints; + + std::vector output_info; + std::vector input_info; + std::vector saved_variables; + // For each input, true if the input is a THPVariable + std::vector is_variable_input; + char has_freed_buffers; + + PyObject* saved_for_forward; + // The actual PyNode (in the autograd graph) that this data was + // saved for. This field may be NULL (because a user can construct + // a THPFunction directly from Python), but when this field is non-NULL, + // it is guaranteed that cdata.lock()->obj == this + // + // In most ordinary use, this field should always be non-NULL; e.g., + // when we allocate a THPFunction because we are running Node.apply, + // after constructing a THPFunction, we immediately allocate a PyNode + // for it. We can't enforce this directly in the constructor of + // THPFunction though, because there's no way to keep it live long enough + // to save an owning reference to PyNode into the grad_fn of a Variable. + std::weak_ptr cdata; +}; + +bool THPFunction_initModule(PyObject* module); +extern PyTypeObject THPFunctionType; +extern PyObject* THPFunctionClass; +extern PyObject* THPGradientEdgeClass; + +inline bool THPFunction_Check(PyObject* obj) { + return PyObject_IsInstance(obj, (PyObject*)&THPFunctionType); +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_hook.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..a17a97924b2a61fa1a887e7aad197e4c14777302 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_hook.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include + +namespace torch::dynamo::autograd { +class SwapSavedVariables; +} // namespace torch::dynamo::autograd + +namespace torch::autograd { + +struct PyFunctionTensorPreHook : public FunctionPreHook { + PyFunctionTensorPreHook(PyObject* dict, size_t value_idx); + ~PyFunctionTensorPreHook() override; + variable_list operator()(const variable_list& values) override; + void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override; + PyObject* dict; + size_t value_idx; +}; + +struct PyFunctionPreHook : public FunctionPreHook { + PyFunctionPreHook(PyObject* dict); + ~PyFunctionPreHook() override; + variable_list operator()(const variable_list& values) override; + void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override; + PyObject* dict; +}; + +struct PyFunctionPostHook : public FunctionPostHook { + PyFunctionPostHook(PyObject* dict); + ~PyFunctionPostHook() override; + variable_list operator()( + const variable_list& outputs, + const variable_list& inputs) override; + void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override; + PyObject* dict; +}; + +// PyFunctionTensorPostAccGradHooks is a dictionary of PostAccumulateGradHooks, +// and it is understandable if you are confused by why it's a subclass. We are +// simply following the precedent of PyFunctionPreHook and PyFunctionPostHook +// above to easily enroll into existing infrastructure. +struct PyFunctionTensorPostAccGradHooks : public PostAccumulateGradHook { + PyFunctionTensorPostAccGradHooks(PyObject* dict); + ~PyFunctionTensorPostAccGradHooks() override; + void operator()(const Variable& tensor) override; + void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override; + void apply_with_saved( + Variable& tensor, + torch::dynamo::autograd::SwapSavedVariables& saved) override; + PyObject* dict; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_linalg_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_linalg_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..685c87bb6d2a8583b7cd2099a2b38e3ba47fe181 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_linalg_functions.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd { + +void initLinalgFunctions(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_saved_variable_hooks.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_saved_variable_hooks.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7e1a287684b7813592b19060e962511e76ee5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_saved_variable_hooks.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace torch::autograd { + +struct PySavedVariableHooks : public SavedVariableHooks { + PySavedVariableHooks(py::function& pack_hook, py::function& unpack_hook); + void call_pack_hook(const at::Tensor& tensor) override; + at::Tensor call_unpack_hook() override; + ~PySavedVariableHooks() override; + + private: + PyObject* pack_hook_; + PyObject* unpack_hook_; + PyObject* data_ = nullptr; +}; + +struct PyDefaultSavedVariableHooks { + static void push_hooks(py::function& pack_hook, py::function& unpack_hook); + static void pop_hooks(); + static std::unique_ptr get_hooks(); +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_special_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_special_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..d036ce4383b562b5a88a74f2c455c63cae7ef1bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_special_functions.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::autograd { + +void initSpecialFunctions(PyObject* module); + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_torch_functions.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_torch_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..61442c46341dfd301afd8efe992e2be11f098bde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_torch_functions.h @@ -0,0 +1,25 @@ +#include + +namespace torch::autograd { + +extern PyObject* THPVariableFunctionsModule; + +// Wrapper converts a raised TypeError into returning NotImplemented +// Used to implement binary arithmetic operators +template +inline PyObject* TypeError_to_NotImplemented_( + PyObject* self, + PyObject* args, + PyObject* kwargs) { + PyObject* ret = Func(self, args, kwargs); + if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + Py_INCREF(Py_NotImplemented); + ret = Py_NotImplemented; + } + return ret; +} + +void initTorchFunctions(); + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/record_function_ops.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/record_function_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d37aba7dfff85547942c0b2ed33b18798794b144 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/record_function_ops.h @@ -0,0 +1,27 @@ +#pragma once +#include +#include +#include + +namespace torch::autograd::profiler { + +struct PythonRecordFunction : public torch::CustomClassHolder { + at::RecordFunction record; + + explicit PythonRecordFunction( + at::RecordScope scope = at::RecordScope::FUNCTION) + : record(scope) {} +}; + +// Creates a new profiling scope using RecordFunction and invokes its starting +// callbacks. +TORCH_API c10::intrusive_ptr record_function_enter_new( + const std::string& name, + const c10::optional& args = c10::nullopt); + +// Schedules RecordFunction's end callbacks to be run on completion of a future. +TORCH_API c10::intrusive_ptr _call_end_callbacks_on_fut_new( + const c10::intrusive_ptr& record, + const c10::intrusive_ptr& fut); + +} // namespace torch::autograd::profiler diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a358ede89e6b1dab5e73484f237eade35bda77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/saved_variable.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +#include + +#include +#include + +namespace torch::autograd { + +using Variable = at::Tensor; +struct Node; + +TORCH_API extern const char* ERR_BACKWARD_TWICE; + +/// A snapshot of a variable at a certain version. A `SavedVariable` stores +/// enough information to reconstruct a variable from a certain point in time. +class TORCH_API SavedVariable { + public: + SavedVariable() = default; + SavedVariable( + const Variable& variable, + bool is_output, + bool is_inplace_on_view = false); + SavedVariable( + const c10::optional& variable, + bool is_output, + bool is_inplace_on_view = false); + SavedVariable(SavedVariable&&) = default; + SavedVariable& operator=(SavedVariable&&) = default; + ~SavedVariable() { + if (fw_grad_) { + // See note [ Using ForwardGrad ] + fw_grad_->clear(); + } + } + + /// Reconstructs the saved variable. Pass `saved_for` as the gradient + /// function if constructing the `SavedVariable` with it would have caused a + /// circular reference. + Variable unpack(std::shared_ptr saved_for = nullptr) const; + + void register_hooks(std::unique_ptr&& hooks); + + void reset_data(); + + bool has_hooks() const { + return (bool)hooks_; + } + + private: + // This field contains either: + // 1. the variable to save + // 2. or its tensor_data. + // If storing the variable itself would create a circular reference, + // we fall into the second case and its metadata is also saved separately. + // In that case, the grad_fn must be passed in to the unpack function when + // reconstructing the Variable (except when we are doing an inplace operation + // on a view, see below). The field saved_original_ below reflects the two + // cases: its value is true in the first case and false in the second case. + // The value data_.defined() can be false in three cases: + // 1. SavedVariable was constructed without a Tensor (the value to save is + // None), in that case was_default_constructed_ will be kept at true + // 2. The saved variable has been released by calling + // SavedVariable::reset_data(), typically during the backward pass + // 3. Hooks have been registered. In that case, hooks_ will be defined + // instead. Note that the value of saved_original_ only reflects what happened + // during the construction of the SavedVariable. If saved_original_ is true, + // we saved the original tensor in data_, but if the user registers hooks, we + // will no longer have it (despite the saved_original_ still being true) + at::Tensor data_; + + // This field is used to store the forward AD gradients associated with + // the saved Tensor. Note that this shared_ptr must never be shared with + // either the saved Tensor or the unpacked Tensor. See note [ Using + // ForwardGrad ] + std::shared_ptr fw_grad_; + + // Weak version of grad_fn_ that prevents leaks in rebase_history() for + // inplace views. + // This variable is used when the user chooses to create a SavedVariable with + // is_inplace_on_view = true. + // In that case, the grad_fn passed in to the unpack function at unwrapping + // time is unused. + std::weak_ptr weak_grad_fn_; + c10::VariableVersion version_counter_; + + uint32_t saved_version_ = 0; + uint32_t output_nr_ = 0; + bool was_default_constructed_ = true; + bool is_inplace_on_view_ = false; + bool saved_original_ = false; + bool is_leaf_ = false; + bool is_output_ = false; + + // Hooks are a pair of functions pack_hook/unpack_hook that provides + // fine-grained control over how the SavedVariable should save its data. + // pack_hook is called upon registration, while unpack_hook is called when + // unpacking. + std::unique_ptr hooks_; + // Fields grad_fn_, grad_accumulator_, and requires_grad_ are only used if + // hooks are defined. They are set before pack_hook is called and used after + // unpack_hook is called. + std::shared_ptr grad_fn_; + // For the usual case where leaf tensors are the input, we expect its + // grad_acc to be kept alive by the graph. The reason SavedVariable holds + // a owning reference is to support the case where a custom autograd Function + // saves an intermediate. + std::shared_ptr grad_accumulator_; + bool requires_grad_ = false; + + void save_metadata(const Variable& data); + static std::unique_ptr get_default_hooks(); + void set_hooks_and_pack_data( + std::unique_ptr&& hooks, + const Variable& data); +}; +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/symbolic.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/symbolic.h new file mode 100644 index 0000000000000000000000000000000000000000..1cec51648432c6ff615bdd486e52a85ef3efb7f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/symbolic.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include + +namespace torch::autograd { + +struct SymbolicContext { + jit::Block* block; +}; + +struct symbolic_unconvertible : public std::runtime_error { + using std::runtime_error::runtime_error; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable_info.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable_info.h new file mode 100644 index 0000000000000000000000000000000000000000..63e88deb0d547fb0ab6a7f991bab814028929f7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/variable_info.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace torch::autograd { + +struct TORCH_API VariableInfo { + explicit VariableInfo(); + explicit VariableInfo(const Variable& var); + + Variable zeros(at::OptionalDeviceGuard& device_guard) const; + + at::Layout layout = at::Layout::Strided; + at::Device device = at::kCPU; + at::ScalarType scalar_type = at::kFloat; + std::vector size; + bool requires_grad; + bool is_empty; +}; + +} // namespace torch::autograd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..9afefe345388f5013c352202c21736cc9f71eaa2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/back_compat.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace torch::onnx { + +// The following constants are defined here to avoid breaking Meta's internal +// usage of ONNX which pre-dates ONNX 1.14 and thus does not support FLOAT8: +// cf. https://github.com/pytorch/pytorch/pull/106379#issuecomment-1675189340 +// -abock, 2023-08-25 +// +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FN +constexpr auto TensorProto_DataType_FLOAT8E4M3FN = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(17); +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FNUZ +constexpr auto TensorProto_DataType_FLOAT8E4M3FNUZ = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(18); +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2 +constexpr auto TensorProto_DataType_FLOAT8E5M2 = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(19); +// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2FNUZ +constexpr auto TensorProto_DataType_FLOAT8E5M2FNUZ = + static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(20); + +} // namespace torch::onnx diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h new file mode 100644 index 0000000000000000000000000000000000000000..923aca2097d32d1a6b770b60cd2c9a5b3786b8b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::onnx { + +void initONNXBindings(PyObject* module); + +} // namespace torch::onnx diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..df887844ff66564662ab4a179911180a73132c37 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h @@ -0,0 +1,20 @@ +#pragma once + +namespace torch::onnx { + +enum class OperatorExportTypes { + ONNX, // Strict ONNX export + ONNX_ATEN, // ONNX With ATen op everywhere + ONNX_ATEN_FALLBACK, // ONNX export with ATen fallback + ONNX_FALLTHROUGH, // Export supported ONNX ops. Pass through unsupported ops. +}; + +enum class TrainingMode { + EVAL, // Inference mode + PRESERVE, // Preserve model state (eval/training) + TRAINING, // Training mode +}; + +constexpr char kOnnxNodeNameAttribute[] = "onnx_name"; + +} // namespace torch::onnx diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..a556ae6a81e572f1db0efed8dd0640afb40cea82 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class.h @@ -0,0 +1,515 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// This function is used in conjunction with `class_::def()` to register +/// a constructor for a given C++ class type. For example, +/// `torch::init()` would register a two-argument constructor +/// taking an `int` and a `std::string` as argument. +template +detail::types init() { + return detail::types{}; +} + +template +struct InitLambda { + Func f; +}; + +template +decltype(auto) init(Func&& f) { + using InitTraits = c10::guts::infer_function_traits_t>; + using ParameterTypeList = typename InitTraits::parameter_types; + + InitLambda init{std::forward(f)}; + return init; +} + +/// Entry point for custom C++ class registration. To register a C++ class +/// in PyTorch, instantiate `torch::class_` with the desired class as the +/// template parameter. Typically, this instantiation should be done in +/// the initialization of a global variable, so that the class will be +/// made available on dynamic library loading without any additional API +/// calls needed. For example, to register a class named Foo, you might +/// create a global variable like so: +/// +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod) +/// .def("lambdaMethod", [](const c10::intrusive_ptr& self) { +/// // Do something with `self` +/// }); +/// +/// In addition to registering the class, this registration also chains +/// `def()` calls to register methods. `myMethod()` is registered with +/// a pointer to the Foo class's `myMethod()` method. `lambdaMethod()` +/// is registered with a C++ lambda expression. +template +class class_ : public ::torch::detail::class_base { + static_assert( + std::is_base_of::value, + "torch::class_ requires T to inherit from CustomClassHolder"); + + public: + /// This constructor actually registers the class type. + /// String argument `namespaceName` is an identifier for the + /// namespace you would like this class to appear in. + /// String argument `className` is the name you would like to + /// see this class exposed as in Python and TorchScript. For example, if + /// you pass `foo` as the namespace name and `Bar` as the className, the + /// class will appear as `torch.classes.foo.Bar` in Python and TorchScript + explicit class_( + const std::string& namespaceName, + const std::string& className, + std::string doc_string = "") + : class_base( + namespaceName, + className, + std::move(doc_string), + typeid(c10::intrusive_ptr), + typeid(c10::tagged_capsule)) {} + + /// def() can be used in conjunction with `torch::init()` to register + /// a constructor for a given C++ class type. For example, passing + /// `torch::init()` would register a two-argument + /// constructor taking an `int` and a `std::string` as argument. + template + class_& def( + torch::detail::types, + std::string doc_string = "", + std::initializer_list default_args = + {}) { // Used in combination with + // torch::init<...>() + auto func = [](c10::tagged_capsule self, Types... args) { + auto classObj = c10::make_intrusive(args...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(std::move(classObj))); + }; + + defineMethod( + "__init__", + std::move(func), + std::move(doc_string), + default_args); + return *this; + } + + // Used in combination with torch::init([]lambda(){......}) + template + class_& def( + InitLambda> init, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto init_lambda_wrapper = [func = std::move(init.f)]( + c10::tagged_capsule self, + ParameterTypes... arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(func, std::forward(arg)...); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + + defineMethod( + "__init__", + std::move(init_lambda_wrapper), + std::move(doc_string), + default_args); + + return *this; + } + + /// This is the normal method registration API. `name` is the name that + /// the method will be made accessible by in Python and TorchScript. + /// `f` is a callable object that defines the method. Typically `f` + /// will either be a pointer to a method on `CurClass`, or a lambda + /// expression that takes a `c10::intrusive_ptr` as the first + /// argument (emulating a `this` argument in a C++ method.) + /// + /// Examples: + /// + /// // Exposes method `foo` on C++ class `Foo` as `call_foo()` in + /// // Python and TorchScript + /// .def("call_foo", &Foo::foo) + /// + /// // Exposes the given lambda expression as method `call_lambda()` + /// // in Python and TorchScript. + /// .def("call_lambda", [](const c10::intrusive_ptr& self) { + /// // do something + /// }) + template + class_& def( + std::string name, + Func f, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto wrapped_f = detail::wrap_func(std::move(f)); + defineMethod( + std::move(name), + std::move(wrapped_f), + std::move(doc_string), + default_args); + return *this; + } + + /// Method registration API for static methods. + template + class_& def_static(std::string name, Func func, std::string doc_string = "") { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + std::move(qualMethodName), + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + classTypePtr->addStaticMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// Property registration API for properties with both getter and setter + /// functions. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + SetterFunc setter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + torch::jit::Function* setter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + auto wrapped_setter = + detail::wrap_func(std::move(setter_func)); + setter = defineMethod(name + "_setter", wrapped_setter, doc_string); + + classTypePtr->addProperty(name, getter, setter); + return *this; + } + + /// Property registration API for properties with only getter function. + template + class_& def_property( + const std::string& name, + GetterFunc getter_func, + std::string doc_string = "") { + torch::jit::Function* getter{}; + + auto wrapped_getter = + detail::wrap_func(std::move(getter_func)); + getter = defineMethod(name + "_getter", wrapped_getter, doc_string); + + classTypePtr->addProperty(name, getter, nullptr); + return *this; + } + + /// Property registration API for properties with read-write access. + template + class_& def_readwrite(const std::string& name, T CurClass::*field) { + auto getter_func = [field = + field](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + auto setter_func = [field = field]( + const c10::intrusive_ptr& self, T value) { + self.get()->*field = value; + }; + + return def_property(name, getter_func, setter_func); + } + + /// Property registration API for properties with read-only access. + template + class_& def_readonly(const std::string& name, T CurClass::*field) { + auto getter_func = + [field = std::move(field)](const c10::intrusive_ptr& self) { + return self.get()->*field; + }; + + return def_property(name, getter_func); + } + + /// This is an unsafe method registration API added for adding custom JIT + /// backend support via custom C++ classes. It is not for general purpose use. + class_& _def_unboxed( + const std::string& name, + std::function func, + c10::FunctionSchema schema, + std::string doc_string = "") { + auto method = std::make_unique( + qualClassName + "." + name, + std::move(schema), + std::move(func), + std::move(doc_string)); + classTypePtr->addMethod(method.get()); + registerCustomClassMethod(std::move(method)); + return *this; + } + + /// def_pickle() is used to define exactly what state gets serialized + /// or deserialized for a given instance of a custom C++ class in + /// Python or TorchScript. This protocol is equivalent to the Pickle + /// concept of `__getstate__` and `__setstate__` from Python + /// (https://docs.python.org/2/library/pickle.html#object.__getstate__) + /// + /// Currently, both the `get_state` and `set_state` callables must be + /// C++ lambda expressions. They should have the following signatures, + /// where `CurClass` is the class you're registering and `T1` is some object + /// that encapsulates the state of the object. + /// + /// __getstate__(intrusive_ptr) -> T1 + /// __setstate__(T2) -> intrusive_ptr + /// + /// `T1` must be an object that is convertable to IValue by the same rules + /// for custom op/method registration. + /// + /// For the common case, T1 == T2. T1 can also be a subtype of T2. An + /// example where it makes sense for T1 and T2 to differ is if __setstate__ + /// handles legacy formats in a backwards compatible way. + /// + /// Example: + /// + /// .def_pickle( + /// // __getstate__ + /// [](const c10::intrusive_ptr>& self) { + /// return self->stack_; + /// }, + /// [](std::vector state) { // __setstate__ + /// return c10::make_intrusive>( + /// std::vector{"i", "was", "deserialized"}); + /// }) + template + class_& def_pickle(GetStateFn&& get_state, SetStateFn&& set_state) { + static_assert( + c10::guts::is_stateless_lambda>::value && + c10::guts::is_stateless_lambda>::value, + "def_pickle() currently only supports lambdas as " + "__getstate__ and __setstate__ arguments."); + def("__getstate__", std::forward(get_state)); + + // __setstate__ needs to be registered with some custom handling: + // We need to wrap the invocation of the user-provided function + // such that we take the return value (i.e. c10::intrusive_ptr) + // and assign it to the `capsule` attribute. + using SetStateTraits = + c10::guts::infer_function_traits_t>; + using SetStateArg = typename c10::guts::typelist::head_t< + typename SetStateTraits::parameter_types>; + auto setstate_wrapper = [set_state = std::forward(set_state)]( + c10::tagged_capsule self, + SetStateArg&& arg) { + c10::intrusive_ptr classObj = + at::guts::invoke(set_state, std::forward(arg)); + auto object = self.ivalue.toObject(); + object->setSlot(0, c10::IValue::make_capsule(classObj)); + }; + defineMethod( + "__setstate__", + detail::wrap_func( + std::move(setstate_wrapper))); + + // type validation + auto getstate_schema = classTypePtr->getMethod("__getstate__").getSchema(); + auto format_getstate_schema = [&getstate_schema]() { + std::stringstream ss; + ss << getstate_schema; + return ss.str(); + }; + TORCH_CHECK( + getstate_schema.arguments().size() == 1, + "__getstate__ should take exactly one argument: self. Got: ", + format_getstate_schema()); + auto first_arg_type = getstate_schema.arguments().at(0).type(); + TORCH_CHECK( + *first_arg_type == *classTypePtr, + "self argument of __getstate__ must be the custom class type. Got ", + first_arg_type->repr_str()); + TORCH_CHECK( + getstate_schema.returns().size() == 1, + "__getstate__ should return exactly one value for serialization. Got: ", + format_getstate_schema()); + + auto ser_type = getstate_schema.returns().at(0).type(); + auto setstate_schema = classTypePtr->getMethod("__setstate__").getSchema(); + auto arg_type = setstate_schema.arguments().at(1).type(); + TORCH_CHECK( + ser_type->isSubtypeOf(*arg_type), + "__getstate__'s return type should be a subtype of " + "input argument of __setstate__. Got ", + ser_type->repr_str(), + " but expected ", + arg_type->repr_str()); + + return *this; + } + + private: + template + torch::jit::Function* defineMethod( + std::string name, + Func func, + std::string doc_string = "", + std::initializer_list default_args = {}) { + auto qualMethodName = qualClassName + "." + name; + auto schema = + c10::inferFunctionSchemaSingleReturn(std::move(name), ""); + + // If default values are provided for function arguments, there must be + // none (no default values) or default values for all function + // arguments, except for self. This is because argument names are not + // extracted by inferFunctionSchemaSingleReturn, and so there must be a + // torch::arg instance in default_args even for arguments that do not + // have an actual default value provided. + TORCH_CHECK( + default_args.size() == 0 || + default_args.size() == schema.arguments().size() - 1, + "Default values must be specified for none or all arguments"); + + // If there are default args, copy the argument names and default values to + // the function schema. + if (default_args.size() > 0) { + schema = withNewArguments(schema, default_args); + } + + auto wrapped_func = + [func = std::move(func)](jit::Stack& stack) mutable -> void { + // TODO: we need to figure out how to profile calls to custom functions + // like this! Currently can't do it because the profiler stuff is in + // libtorch and not ATen + using RetType = + typename c10::guts::infer_function_traits_t::return_type; + detail::BoxedProxy()(stack, func); + }; + auto method = std::make_unique( + qualMethodName, + std::move(schema), + std::move(wrapped_func), + std::move(doc_string)); + + // Register the method here to keep the Method alive. + // ClassTypes do not hold ownership of their methods (normally it + // those are held by the CompilationUnit), so we need a proxy for + // that behavior here. + auto method_val = method.get(); + classTypePtr->addMethod(method_val); + registerCustomClassMethod(std::move(method)); + return method_val; + } +}; + +/// make_custom_class() is a convenient way to create an instance of a +/// registered custom class and wrap it in an IValue, for example when you want +/// to pass the object to TorchScript. Its syntax is equivalent to APIs like +/// `std::make_shared<>` or `c10::make_intrusive<>`. +/// +/// For example, if you have a custom C++ class that can be constructed from an +/// `int` and `std::string`, you might use this API like so: +/// +/// IValue custom_class_iv = torch::make_custom_class(3, +/// "foobarbaz"); +template +c10::IValue make_custom_class(CtorArgs&&... args) { + auto userClassInstance = + c10::make_intrusive(std::forward(args)...); + return c10::IValue(std::move(userClassInstance)); +} + +// Alternative api for creating a torchbind class over torch::class_ this api is +// preffered to prevent size regressions on Edge usecases. Must be used in +// conjunction with TORCH_SELECTIVE_CLASS macro aka +// selective_class("foo_namespace", TORCH_SELECTIVE_CLASS("foo")) +template +inline class_ selective_class_( + const std::string& namespace_name, + detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + return torch::class_(namespace_name, class_name); +} + +template +inline detail::ClassNotSelected selective_class_( + const std::string&, + detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +// jit namespace for backward-compatibility +// We previously defined everything in torch::jit but moved it out to +// better reflect that these features are not limited only to TorchScript +namespace jit { + +using ::torch::class_; +using ::torch::getCustomClass; +using ::torch::init; +using ::torch::isCustomClass; + +} // namespace jit + +template +inline class_ Library::class_(const std::string& className) { + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + className, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, className); +} + +const std::unordered_set getAllCustomClassesNames(); + +template +inline class_ Library::class_(detail::SelectiveStr className) { + auto class_name = std::string(className.operator const char*()); + TORCH_CHECK( + kind_ == DEF || kind_ == FRAGMENT, + "class_(\"", + class_name, + "\"): Cannot define a class inside of a TORCH_LIBRARY_IMPL block. " + "All class_()s should be placed in the (unique) TORCH_LIBRARY block for their namespace. " + "(Error occurred at ", + file_, + ":", + line_, + ")"); + TORCH_INTERNAL_ASSERT(ns_.has_value(), file_, ":", line_); + return torch::class_(*ns_, class_name); +} + +template +inline detail::ClassNotSelected Library::class_(detail::SelectiveStr) { + return detail::ClassNotSelected(); +} + +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h new file mode 100644 index 0000000000000000000000000000000000000000..736d5aacdaa3226e7a247383333823870f978405 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/custom_class_detail.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { + +namespace detail { +/** + * In the Facebook internal build (using BUCK), this macro is enabled by + * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer + * binary. + */ +#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE +TORCH_API void record_custom_class(std::string name); + +/** + * Record an instance of a custom class being loaded + * grab portion of string after final '.' from qualified name + * as this seemingly aligns with how users name their custom classes + * example: __torch__.torch.classes.xnnpack.Conv2dOpContext + */ +#define RECORD_CUSTOM_CLASS(NAME) \ + auto name = std::string(NAME); \ + detail::record_custom_class(name.substr(name.find_last_of(".") + 1)); +#else +#define RECORD_CUSTOM_CLASS(NAME) +#endif +} // namespace detail + +/// This struct is used to represent default values for arguments +/// when registering methods for custom classes. +/// static auto register_foo = torch::class_("myclasses", "Foo") +/// .def("myMethod", &Foo::myMethod, {torch::arg("name") = name}); +struct arg { + // Static method for representing a default value of None. This is meant to + // be used like so: + // torch::arg("name") = torch::arg::none + // and is identical to: + // torch::arg("name") = IValue() + static c10::IValue none() { + return c10::IValue(); + } + + // Explicit constructor. + explicit arg(std::string name) + : name_(std::move(name)), value_(c10::nullopt) {} + // Assignment operator. This enables the pybind-like syntax of + // torch::arg("name") = value. + arg& operator=(const c10::IValue& rhs) { + value_ = rhs; + return *this; + } + + // The name of the argument. This is copied to the schema; argument + // names cannot be extracted from the C++ declaration. + std::string name_; + // IValue's default constructor makes it None, which is not distinguishable + // from an actual, user-provided default value that is None. This boolean + // helps distinguish between the two cases. + c10::optional value_; +}; + +namespace detail { + +// Argument type utilities +template +struct types { + using type = types; +}; + +template +struct WrapMethod; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...)) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...); +}; + +template +struct WrapMethod { + WrapMethod(R (CurrClass::*m)(Args...) const) : m(std::move(m)) {} + + R operator()(c10::intrusive_ptr cur, Args... args) { + return c10::guts::invoke(m, *cur, args...); + } + + R (CurrClass::*m)(Args...) const; +}; + +// Adapter for different callable types +template < + typename CurClass, + typename Func, + std::enable_if_t< + std::is_member_function_pointer>::value, + bool> = false> +WrapMethod wrap_func(Func f) { + return WrapMethod(std::move(f)); +} + +template < + typename CurClass, + typename Func, + std::enable_if_t< + !std::is_member_function_pointer>::value, + bool> = false> +Func wrap_func(Func f) { + return f; +} + +template < + class Functor, + bool AllowDeprecatedTypes, + size_t... ivalue_arg_indices> +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack( + Functor& functor, + jit::Stack& stack, + std::index_sequence) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would + // be unused and we have to silence the compiler warning. + + constexpr size_t num_ivalue_args = sizeof...(ivalue_arg_indices); + + using IValueArgTypes = + typename c10::guts::infer_function_traits_t::parameter_types; + // TODO We shouldn't use c10::impl stuff directly here. We should use the + // KernelFunction API instead. + return (functor)(c10::impl::ivalue_to_arg< + typename c10::impl::decay_if_not_tensor< + c10::guts::typelist:: + element_t>::type, + AllowDeprecatedTypes>:: + call(torch::jit::peek( + stack, ivalue_arg_indices, num_ivalue_args))...); +} + +template +typename c10::guts::infer_function_traits_t::return_type +call_torchbind_method_from_stack(Functor& functor, jit::Stack& stack) { + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + return call_torchbind_method_from_stack( + functor, stack, std::make_index_sequence()); +} + +template +struct BoxedProxy; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + auto retval = call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(c10::ivalue::from(std::move(retval))); + } +}; + +template +struct BoxedProxy { + void operator()(jit::Stack& stack, Func& func) { + call_torchbind_method_from_stack(func, stack); + constexpr size_t num_ivalue_args = + c10::guts::infer_function_traits_t::number_of_parameters; + torch::jit::drop(stack, num_ivalue_args); + stack.emplace_back(); + } +}; + +inline bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); +} + +inline void checkValidIdent(const std::string& str, const char* type) { + for (const auto i : c10::irange(str.size())) { + TORCH_CHECK( + validIdent(i, str[i]), + type, + " must be a valid Python/C++ identifier." + " Character '", + str[i], + "' at index ", + i, + " is illegal."); + } +} + +class TORCH_API class_base { + protected: + explicit class_base( + const std::string& namespaceName, + const std::string& className, + std::string doc_string, + const std::type_info& intrusivePtrClassTypeid, + const std::type_info& taggedCapsuleClass); + + static c10::FunctionSchema withNewArguments( + const c10::FunctionSchema& schema, + std::initializer_list default_args); + std::string qualClassName; + at::ClassTypePtr classTypePtr; +}; + +} // namespace detail + +TORCH_API void registerCustomClass(at::ClassTypePtr class_type); +TORCH_API void registerCustomClassMethod(std::unique_ptr method); + +// Given a qualified name (e.g. __torch__.torch.classes.Foo), return +// the ClassType pointer to the Type that describes that custom class, +// or nullptr if no class by that name was found. +TORCH_API at::ClassTypePtr getCustomClass(const std::string& name); + +// Given an IValue, return true if the object contained in that IValue +// is a custom C++ class, otherwise return false. +TORCH_API bool isCustomClass(const c10::IValue& v); + +// This API is for testing purposes ONLY. It should not be used in +// any load-bearing code. +TORCH_API std::vector customClassSchemasForBCCheck(); + +namespace jit { +using ::torch::registerCustomClass; +using ::torch::registerCustomClassMethod; +} // namespace jit + +} // namespace torch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/extension.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/extension.h new file mode 100644 index 0000000000000000000000000000000000000000..671ae1aadb8d5f2ad33cfe27a8fe1481856e668b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/extension.h @@ -0,0 +1,9 @@ +#pragma once + +#ifndef TORCH_INDUCTOR_CPP_WRAPPER +// All pure C++ headers for the C++ frontend. +#include +#endif + +// Python bindings for the C++ frontend (includes Python.h). +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/library.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/library.h new file mode 100644 index 0000000000000000000000000000000000000000..793c87544233acccb2e08f1626d892896de3b5ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/library.h @@ -0,0 +1,1050 @@ +#pragma once + +/// \file +/// +/// This header provides an API for extending PyTorch's core library +/// of operators with user defined operators and data types. This +/// API can be used in a few ways: +/// +/// * You can define new custom operators and classes with TORCH_LIBRARY(), +/// making them available for use in both eager Python as well as in +/// TorchScript. This API is modeled off of pybind11's `PYBIND11_MODULE` +/// macro, as the provided functionality is similar (pybind11 lets you bind +/// C++ to Python only; `torch/library.h` lets you bind C++ simultaneously to +/// Python and TorchScript). +/// +/// * You can override existing operators with TORCH_LIBRARY_IMPL(), +/// providing a new implementation for these operators for a custom +/// backend (e.g., XLA). When you pass operators with tensors of your custom +/// backend, your overridden implementations will be called instead +/// of the standard implementations. +/// +/// * You can use both capabilities at the same time, allowing you +/// to write custom operators that register CPU/CUDA/Autograd +/// implementations without having to write the boilerplate +/// conditionals yourself. +/// +/// For a tutorial style introduction to the library API, check +/// out the [Extending TorchScript with Custom C++ +/// Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html) +/// tutorial. +/// +/// ``` +/// // Define a library whose operators live in the namespace 'myops'. +/// // You must define all of the operators for this library in +/// // this namespace. +/// TORCH_LIBRARY(myops, m) { +/// // Define a operator with exactly one implementation for all backends. +/// m.def("add(Tensor self, Tensor other) -> Tensor", &add_impl); +/// +/// // Define a schema for an operator, but provide no implementation +/// // (use this syntax if you want to use the dispatcher) +/// m.def("mul(Tensor self, Tensor other) -> Tensor"); +/// +/// // Provide an implementation for a defined operator (you can +/// // provide multiple; one per backend). The dispatcher takes care of +/// // calling the correct implementation depending on if we get a CPU +/// // tensor or a CUDA tensor +/// m.impl("mul", torch::kCPU, &mul_cpu_impl); +/// m.impl("mul", torch::kCUDA, &mul_cuda_impl); +/// } +/// +/// // Define implementations for operators for a non-standard backend, +/// // e.g., XLA (valid values are entries of DispatchKey). This can +/// // be used to define operators in a different file than the initial +/// // TORCH_LIBRARY definition (e.g., if it is in an external library) +/// TORCH_LIBRARY_IMPL(myops, XLA, m) { +/// m.impl("mul", &mul_xla_impl); +/// } +/// ``` + +#include +#include +#include +#include +#include + +// Just for inferFunctionSchemaFromFunctor +#include +#include + +namespace torch { + +#if defined C10_MOBILE +/** + * The NoInferSchemaTag is a type name used to indicate that this call to the + * CppFunction constructor should not trigger schema inference from functor. + * Schema inference from functor utilizes template meta-programming, and is + * costly from a size perspective. Ideally, one would expect that the schema + * inference would require very little binary size since most of the + * computation can be done by the compiler at build time, but that isn't + * necessarily the case. + * + * Schema inference is elided only for mobile use-cases where we don't need + * the additional runtime cost or size overhead on client devices. + * + */ +struct NoInferSchemaTag {}; +#endif + +#define HAS_PT2_COMPLIANT_TAG + +// For multipy/torchdeploy use case +enum class _RegisterOrVerify { REGISTER, VERIFY }; + +template +class class_; + +#define HAS_IMPL_ABSTRACT_PYSTUB + +/// Represents a C++ function that implements an operator. Most users won't +/// interact directly with this class, except via error messages: the +/// constructors this function define the set of permissible "function"-like +/// things you can bind via the interface. +/// +/// This class erases the type of the passed in function, but durably records +/// the type via an inferred schema for the function. +class TORCH_API CppFunction final { + // TODO: This is morally the same thing as KernelRegistrationConfig, but it's + // opaque to the user. + + public: + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl)` + template + explicit CppFunction( + Func* f, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()), + schema_( + c10::detail::inferFunctionSchemaFromFunctor>()), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl))` + template + explicit CppFunction( + FuncPtr f, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + typename FuncPtr::FuncType>()), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... })` + template + explicit CppFunction( + Lambda&& f, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()), + schema_(c10::detail::inferFunctionSchemaFromFunctor< + std::decay_t>()), + debug_() {} + +#if defined C10_MOBILE + /// This overload accepts function pointers, e.g., `CppFunction(&add_impl, + /// NoInferSchemaTag())` + template + explicit CppFunction( + Func* f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts compile time function pointers, e.g., + /// `CppFunction(TORCH_FN(add_impl), NoInferSchemaTag())` + template + explicit CppFunction( + FuncPtr f, + NoInferSchemaTag, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedFunction(f)), + cpp_signature_( + c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} + + /// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) { + /// ... }. NoInferSchemaTag())` + template + explicit CppFunction( + Lambda&& f, + NoInferSchemaTag, + std::enable_if_t< + c10::guts::is_functor>::value, + std::nullptr_t> = nullptr) + : func_(c10::KernelFunction::makeFromUnboxedLambda( + std::forward(f))), + cpp_signature_(c10::impl::CppSignature::make()) + // TODO: Don't go through WrapRuntimeKernelFunctor + , + schema_(nullptr), + debug_() {} +#endif + + ~CppFunction(); + + CppFunction(CppFunction&&) noexcept = default; + + CppFunction& operator=(CppFunction&&) = default; + + /// \private + /// Creates a function from a type-erased boxed kernel. + static CppFunction makeFromBoxedKernel(c10::BoxedKernel kernel) { + return CppFunction( + c10::KernelFunction::makeFromBoxedKernel(std::move(kernel)), + /* cpp_signature */ c10::nullopt, // not known for boxed functions + /* schema */ nullptr); + } + + /// This creates a fallthrough function. Fallthrough functions + /// immediately redispatch to the next available dispatch key, + /// but are implemented more efficiently than a hand written + /// function done in the same way. + static CppFunction makeFallthrough() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFallthrough()); + } + + /// \private + /// + /// Creates a function that raises an error saying that named tensors + /// are not supported when called. + static CppFunction makeNamedNotSupported() { + return makeFromBoxedKernel(c10::BoxedKernel::makeNamedNotSupported()); + } + + /// Create a function from a boxed kernel function with signature + /// `void(const OperatorHandle&, Stack*)`; i.e., they receive a + /// stack of arguments in a boxed calling convention, rather than + /// in the native C++ calling convention. Boxed functions are + /// typically only used to register backend fallbacks via + /// torch::Library::fallback(). + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + // Variant that takes in a boxed kernel function with a plumbed + // DispatchKeySet. See Note [Plumbing Keys Through The Dispatcher] for + // details. + template + static CppFunction makeFromBoxedFunction() { + return makeFromBoxedKernel(c10::BoxedKernel::makeFromFunction()); + } + + /// Create a function from a boxed kernel functor which defines + /// `operator()(const OperatorHandle&, DispatchKeySet, Stack*)` + /// (receiving arguments from boxed calling convention) and inherits + /// from `c10::OperatorKernel`. Unlike makeFromBoxedFunction, functions + /// registered in this way can also carry additional state which + /// is managed by the functor; this is useful if you're writing an + /// adapter to some other implementation, e.g., a Python callable, which + /// is dynamically associated with the registered kernel. + template + static CppFunction makeFromBoxedFunctor( + std::unique_ptr kernelFunctor) { + return makeFromBoxedKernel( + c10::BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); + } + + /// Create a function from an unboxed kernel function. + /// This is typically used to register common operators. + template < + typename FuncPtr, + std::enable_if_t< + c10::guts::is_function_type::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr* f) { + return CppFunction(f); + } + + /// Create a function from a compile time unboxed kernel function pointer. + /// This is typically used to register common operators. + /// Compile time function pointers can be used to allow the compiler + /// to optimize (e.g. inline) calls to it. + template < + typename FuncPtr, + std::enable_if_t< + c10::is_compile_time_function_pointer::value, + std::nullptr_t> = nullptr> + static CppFunction makeFromUnboxedFunction(FuncPtr f) { + return CppFunction(f); + } + + CppFunction&& debug(std::string d) && { + debug_ = std::move(d); + return std::move(*this); + } + + private: + c10::optional dispatch_key_; + c10::KernelFunction func_; + c10::optional cpp_signature_; + std::unique_ptr schema_; + std::string debug_; + + // The "setter" for dispatch_key_ + template + friend CppFunction dispatch(c10::DispatchKey, Func&&); + + // The only class which actually pulls out values from CppFunction (does so + // destructively, felt too lazy to write accessors that I don't even + // want users to use) + friend class Library; + + CppFunction( + c10::KernelFunction func, + c10::optional cpp_signature, + std::unique_ptr schema); +}; + +/// \defgroup torch-dispatch-overloads torch::dispatch overloads + +/// Create a torch::CppFunction which is associated with a specific +/// dispatch key. torch::CppFunctions that are tagged with a +/// c10::DispatchKey don't get invoked unless the dispatcher determines +/// that this particular c10::DispatchKey is the one that should be +/// dispatched to. +/// +/// This function is generally not used directly, instead, prefer using +/// TORCH_LIBRARY_IMPL(), which will implicitly set the c10::DispatchKey +/// for all registration calls inside of its body. +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DispatchKey k, Func&& raw_f) { + CppFunction f(std::forward(raw_f)); + if (k == c10::DispatchKey::CatchAll) { + f.dispatch_key_ = c10::nullopt; + } else { + f.dispatch_key_ = k; + } + return f; +} + +/// Convenience overload of dispatch() which accepts c10::DeviceType +/// +/// \ingroup torch-dispatch-overloads +template +inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) { + auto deviceTypeToDispatchKey = [](c10::DeviceType t) { + switch (t) { + // This list is synchronized with the k-constants in c10/core/DeviceType.h + case c10::DeviceType::CPU: + return c10::DispatchKey::CPU; + case c10::DeviceType::CUDA: + return c10::DispatchKey::CUDA; + case c10::DeviceType::IPU: + return c10::DispatchKey::IPU; + case c10::DeviceType::XLA: + return c10::DispatchKey::XLA; + case c10::DeviceType::Lazy: + return c10::DispatchKey::Lazy; + case c10::DeviceType::XPU: + return c10::DispatchKey::XPU; + case c10::DeviceType::MPS: + return c10::DispatchKey::MPS; + case c10::DeviceType::Meta: + return c10::DispatchKey::Meta; + case c10::DeviceType::HIP: + return c10::DispatchKey::HIP; + case c10::DeviceType::ORT: + return c10::DispatchKey::ORT; + case c10::DeviceType::HPU: + return c10::DispatchKey::HPU; + case c10::DeviceType::MTIA: + return c10::DispatchKey::MTIA; + case c10::DeviceType::PrivateUse1: + return c10::DispatchKey::PrivateUse1; + default: + TORCH_CHECK( + false, + "Device type ", + t, + " cannot be overloaded at dispatch time, " + "please file a bug report explaining what you were trying to do."); + } + }; + return dispatch(deviceTypeToDispatchKey(type), std::forward(raw_f)); +} + +/// \defgroup torch-schema-overloads torch::schema overloads + +/// Construct a c10::FunctionSchema from a string, with an explicitly +/// specified c10::AliasAnalysisKind. Ordinarily, schemas are simply +/// passed in as strings, but if you need to specify a custom alias +/// analysis, you can replace the string with a call to this function. +/// +/// ``` +/// // Default alias analysis (FROM_SCHEMA) +/// m.def("def3(Tensor self) -> Tensor"); +/// // Pure function alias analysis +/// m.def(torch::schema("def3(Tensor self) -> Tensor", +/// c10::AliasAnalysisKind::PURE_FUNCTION)); +/// ``` +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) { + c10::FunctionSchema s = torch::jit::parseSchema(str); + s.setAliasAnalysis(k); + return s; +} + +/// Function schemas can be directly constructed from string literals. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema schema(const char* s) { + return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA); +} + +/// \private +/// +/// Already constructed function schemas are accepted if they are +/// rvalues. +/// +/// \ingroup torch-schema-overloads +inline c10::FunctionSchema&& schema(c10::FunctionSchema&& s) { + return std::move(s); +} + +namespace detail { + +inline std::variant constructSchemaOrName( + c10::FunctionSchema&& s) { + return std::move(s); +} +inline std::variant constructSchemaOrName( + c10::OperatorName&& n) { + return std::move(n); +} +inline std::variant +constructSchemaOrName(const char* str) { + auto s = torch::jit::parseSchemaOrName(str); + if (std::holds_alternative(s)) { + std::get(s).setAliasAnalysis( + c10::AliasAnalysisKind::FROM_SCHEMA); + } + return s; +} + +class TorchLibraryInit; + +} // namespace detail + +// Note [Selective build] +// ~~~~~~~~~~~~~~~~~~~~~~ +// In some settings, especially mobile, it is important to avoid compiling any +// references to functions that you aren't actually going to use, so that they +// can be eliminated by the linker. We call this capability "selective build". +// +// A very easy way to implement selective build which results in a lot of +// boilerplate is to just add ifdef's around every registration call, but this +// means you have to write a lot of extra lines of code at every registration +// site, and it also means you have to define some munging scheme to map +// operators to macros. +// +// Instead of doing this, we have a different mechanism centered around the +// concept of a SelectiveStr. A selective name is like a const char* string, +// except it also carries at compile time a boolean saying whether or not a +// registration should actually happen or not. We then have extra overloads +// which bypass registration entirely if a selective name is disabled. We do a +// constexpr test to see if a operator should be enabled or not; this is +// currently implemented in ATen/core/op_registration/op_allowlist.h + +namespace detail { + +// dummy class for non selected custom torchbind classes +class ClassNotSelected { + public: + ClassNotSelected& def_pickle(...) { + return *this; + } + ClassNotSelected& def(...) { + return *this; + } +}; + +// A SelectiveStr is like a const char*, except that it also comes +// with a type brand that says whether or not the name is enabled or +// not. If the string is disabled, then (at compile time) we DON'T generate +// a registration call for it. This class is not intended to be called +// directly; use TORCH_SELECTIVE_NAME or TORCH_SELECTIVE_SCHEMA macros below +// to create it. +template +class SelectiveStr { + public: + constexpr explicit SelectiveStr(const char* name) : name_(name) {} + constexpr operator const char*() { + return name_; + } + + private: + const char* name_; +}; + +#define TORCH_SELECTIVE_CLASS(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_NAME(n) \ + torch::detail::SelectiveStr(n) +#define TORCH_SELECTIVE_SCHEMA(n) \ + torch::detail::SelectiveStr(n) + +} // namespace detail + +/// This object provides the API for defining operators and providing +/// implementations at dispatch keys. Typically, a torch::Library +/// is not allocated directly; instead it is created by the +/// TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() macros. +/// +/// Most methods on torch::Library return a reference to itself, +/// supporting method chaining. +/// +/// ``` +/// // Examples: +/// +/// TORCH_LIBRARY(torchvision, m) { +/// // m is a torch::Library +/// m.def("roi_align", ...); +/// ... +/// } +/// +/// TORCH_LIBRARY_IMPL(aten, XLA, m) { +/// // m is a torch::Library +/// m.impl("add", ...); +/// ... +/// } +/// ``` +/// +class TORCH_API Library final { + public: + /// \private + /// + /// Which type of macro produced this Library + enum Kind { + DEF, // from TORCH_LIBRARY (no qualifier) + IMPL, + FRAGMENT, + }; + + /// \private + /// + /// Use TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() instead of using these + /// constructors directly + Library( + Kind kind, + std::string ns, + c10::optional k, + const char* file, + uint32_t line); + + Library(const Library&) = delete; + Library& operator=(const Library&) = delete; + Library(Library&&) = default; + Library& operator=(Library&&) = default; + + // Some notes about the API design here. We had the following constraints: + // + // - We need to support multiple "types" of arguments for schema and + // functions (e.g., unnamed lambda types, regular functions, const char*, + // fully instantiated schemas) + // - We don't want to write exponentially many overloads + // - We don't want to rely on implicit conversion to a common type, + // because the C++ compiler will only be willing to do a single + // implicit conversion (reducing the set of valid types which you + // can invoke with); also error messages are worse when an implicit + // conversion is not selected (as the compiler will not explain + // why it didn't select an implicit conversion; this is different + // from overloads where it will explain each candidate overload and + // why it didn't apply) + // + // To solve all of these constraints at the same time, we use a trick taken + // from the pybind11 library: template over the argument in the user visible + // API, and inside of the templated function explicitly call an overloaded + // function to resolve the argument to a real type. You get the good error + // messages from overloads, but at the same time you only need to write the + // overload for any given argument type once. + + /// Declare an operator with a schema, but don't provide any implementations + /// for it. You're expected to then provide implementations using the + /// impl() method. All template arguments are inferred. + /// + /// \param raw_schema The schema of the operator to be defined. + /// Typically, this is a `const char*` string literal, but any type + /// accepted by torch::schema() is accepted here. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add(Tensor self, Tensor other) -> Tensor"); + /// } + /// ``` + + template + Library& def( + Schema&& raw_schema, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + c10::FunctionSchema s = schema(std::forward(raw_schema)); + return _def(std::move(s), nullptr, tags, rv); + } + + /// Declares that for all operators that are subsequently def'ed, their + /// abstract impls may be found in the given Python module (pymodule). + /// This registers some help text that is used if the abstract impl + /// cannot be found. + /// + /// Args: + /// - pymodule: the python module + /// - context: We may include this in the error message. + Library& impl_abstract_pystub(const char* pymodule, const char* context = "") { + impl_abstract_pystub_ = {pymodule, context}; + return *this; + } + + /// Define an operator for a schema and then register an implementation for + /// it. This is typically what you would use if you aren't planning + /// on making use of the dispatcher to structure your operator + /// implementation. It's roughly equivalent to calling def() and + /// then impl(), but if you omit the schema of the operator, we will + /// infer it from the type of your C++ function. All template + /// arguments are inferred. + /// + /// \param raw_name_or_schema The schema of the operator to be + /// defined, or just the name of the operator if the schema is to be + /// inferred from `raw_f`. Typically a `const char*` literal. + /// \param raw_f The C++ function that implements this operator. + /// Any valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY(myops, m) { + /// m.def("add", add_fn); + /// } + /// ``` + template + Library& def(NameOrSchema&& raw_name_or_schema, Func&& raw_f, + const std::vector& tags = {}) & { + CppFunction f(std::forward(raw_f)); + return _def( + detail::constructSchemaOrName( + ::std::forward(raw_name_or_schema)), + ::std::move(f), tags); + } + + /// Register an implementation for an operator. You may register multiple + /// implementations for a single operator at different dispatch keys + /// (see torch::dispatch()). Implementations must have a corresponding + /// declaration (from def()), otherwise they are invalid. If you plan + /// to register multiple implementations, DO NOT provide a function + /// implementation when you def() the operator. + /// + /// \param name The name of the operator to implement. Do NOT provide + /// schema here. + /// \param raw_f The C++ function that implements this operator. Any + /// valid constructor of torch::CppFunction is accepted here; + /// typically you provide a function pointer or lambda. + /// + /// ``` + /// // Example: + /// TORCH_LIBRARY_IMPL(myops, CUDA, m) { + /// m.impl("add", add_cuda); + /// } + /// ``` + template + Library& impl( + Name name, + Func&& raw_f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def +#if defined C10_MOBILE + CppFunction f(std::forward(raw_f), NoInferSchemaTag()); +#else + CppFunction f(std::forward(raw_f)); +#endif + return _impl(name, std::move(f), rv); + } + +#if defined C10_MOBILE + // Note: This overload is needed only for C10_MOBILE, since the automatically + // defined copy constructor for the CppFunction doesn't have the additional + // NoInferSchemaTag argument. We define the overload for the impl() function + // to accept a CppFunction&& argument. The already constructed CppFunction + // object may or may not have the inferred schema, but it doesn't matter + // for our purposes since if it already has the inferred schema, then we + // might as well just pass it through directly. + // + template + Library& impl(Name name, CppFunction&& raw_f) & { + // TODO: need to raise an error when you impl a function that has a + // catch all def + CppFunction f(std::forward(raw_f)); + return _impl(name, std::move(f)); + } +#endif + + // Helper for getting an OperatorName for a const char*. You probably + // don't need this. + c10::OperatorName _resolve(const char* name) const; + + /// \private + /// + /// Convenience overload for directly specifying the dispatch key when + /// impl(). You probably don't need this; instead, prefer specifying + /// the dispatch key for the entire block in TORCH_LIBRARY_IMPL() + template + Library& impl(Name name, Dispatch&& key, Func&& raw_f) & { + return impl( + name, dispatch(std::forward(key), std::forward(raw_f))); + } + + template + Library& impl_UNBOXED(Name /*name*/, Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + // These overloads cover cases when a SelectiveStr (see Note [Selective + // build]) has been disabled at compile time. In that case, don't generate + // any code referencing the passed in functions at all. + Library& def(detail::SelectiveStr, const std::vector& tags = {}) & { + return *this; + } + Library& def(detail::SelectiveStr raw_schema, const std::vector& tags = {}) & { + return def(raw_schema.operator const char*(), tags); + } + template + Library& def(detail::SelectiveStr, Func&& /*raw_f*/, const std::vector& tags = {}) & { + return *this; + } + template + Library& def(detail::SelectiveStr raw_name_or_schema, Func&& raw_f, const std::vector& tags = {}) & { + return def( + raw_name_or_schema.operator const char*(), std::forward(raw_f), tags); + } + + template + Library& impl(detail::SelectiveStr, Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl( + detail::SelectiveStr, + Dispatch&& /*key*/, + Func&& /*raw_f*/) & { + return *this; + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + template + Library& impl(detail::SelectiveStr name, Func&& raw_f) & { + return impl(name.operator const char*(), std::forward(raw_f)); + } + template + Library& impl( + detail::SelectiveStr name, + Dispatch&& key, + Func&& raw_f) & { + return impl( + name.operator const char*(), + std::forward(key), + std::forward(raw_f)); + } + template + Library& impl_UNBOXED( + detail::SelectiveStr /*name*/, + Func* /*raw_f*/) & { + static_assert( + c10::guts::false_t(), + ".impl_UNBOXED(...) was removed. Please use .impl(...) instead."); + return *this; + } + + /// Register a fallback implementation for all operators which will be used + /// if there is not a specific implementation for an operator available. + /// There MUST be a DispatchKey associated with a fallback; e.g., + /// only call this from TORCH_LIBRARY_IMPL() with namespace `_`. + /// + /// \param raw_f The function that implements the fallback. Unboxed + /// functions typically do not work as fallback functions, as + /// fallback functions must work for every operator (even though + /// they have varying type signatures). Typical arguments are + /// CppFunction::makeFallthrough() or + /// CppFunction::makeFromBoxedFunction() + /// + /// ``` + /// // Example: + /// + /// TORCH_LIBRARY_IMPL(_, AutogradXLA, m) { + /// // If there is not a kernel explicitly registered + /// // for AutogradXLA, fallthrough to the next + /// // available kernel + /// m.fallback(torch::CppFunction::makeFallthrough()); + /// } + /// + /// // See aten/src/ATen/core/dispatch/backend_fallback_test.cpp + /// // for a full example of boxed fallback + /// ``` + template + Library& fallback(Func&& raw_f) & { + CppFunction f((std::forward(raw_f))); + return _fallback(std::move(f)); + } + + template + inline torch::class_ class_(const std::string& className); + + // These overloads enable the use of selective build on classes registered + // within a library. The API is the same as before with 1 minor change. + // Instead of m.class_("foo") you instead do + // m.class_(TORCH_SELECTIVE_CLASS("foo")) + template + inline torch::class_ class_(detail::SelectiveStr className); + + template + inline detail::ClassNotSelected class_(detail::SelectiveStr className); + + // De-registers all registrations created with this Library + void reset(); + + private: + Kind kind_; + c10::optional ns_; + c10::optional dispatch_key_; + c10::optional> impl_abstract_pystub_; + const char* file_; + uint32_t line_; + + std::vector registrars_; + + friend class detail::TorchLibraryInit; + + // Non-user visible actual implementations of functions. These aren't + // public because we only implement & qualifier and not && qualifier + Library& _def( + c10::FunctionSchema&& schema, + c10::OperatorName* out_name = nullptr, + const std::vector& tags = {}, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _def( + std::variant&&, + CppFunction&& f, + const std::vector& tags = {}) &; + Library& _impl( + const char* name, + CppFunction&& f, + _RegisterOrVerify rv = _RegisterOrVerify::REGISTER) &; + Library& _fallback(CppFunction&& f) &; + + at::OperatorName _parseNameForLib(const char* name_str) const; +}; + +namespace detail { + +class TorchLibraryInit final { + private: + using InitFn = void(Library&); + Library lib_; + + public: + TorchLibraryInit( + Library::Kind kind, + InitFn* fn, + const char* ns, + c10::optional k, + const char* file, + uint32_t line) + : lib_(kind, ns, k, file, line) { + fn(lib_); + } +}; + +} // namespace detail + +} // namespace torch + +// NB: The EXACT NAMING of the initializer functions (e.g., +// TORCH_LIBRARY_init_aten) matters for the code analyzer; +// see the regexes at tools/code_analyzer/run_analyzer.sh + +/// Macro for defining a function that will be run at static +/// initialization time to define a library of operators in the +/// namespace `ns` (must be a valid C++ identifier, no quotes). +/// Use this macro when you want to define a new set of custom operators +/// that do not already exist in PyTorch. +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY(myops, m) { +/// // m is a torch::Library; methods on it will define +/// // operators in the myops namespace +/// m.def("add", add_impl); +/// } +/// ``` +/// +/// The `m` argument is bound to a torch::Library that is used to +/// register operators. There may only be one TORCH_LIBRARY() +/// for any given namespace. +#define TORCH_LIBRARY(ns, m) \ + static void TORCH_LIBRARY_init_##ns(torch::Library&); \ + static const torch::detail::TorchLibraryInit TORCH_LIBRARY_static_init_##ns( \ + torch::Library::DEF, \ + &TORCH_LIBRARY_init_##ns, \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void TORCH_LIBRARY_init_##ns(torch::Library& m) + +/// \private +/// +/// This macro is a version of TORCH_LIBRARY() that doesn't enforce that there +/// is only one library (it is a "fragment"). This is used inside the +/// PerOpRegistration.cpp file, as well as in places where all op registrations +/// within the same namespace cannot be easily put into one macro block +/// (this is mostly the case for custom ops in fbcode that were ported from +/// the old API) +#define TORCH_LIBRARY_FRAGMENT(ns, m) _TORCH_LIBRARY_FRAGMENT(ns, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions This can happen if TORCH_LIBRARY_FRAGMENT is called +/// multiple times with the same namespace in the same translation unit. Note +/// that the TORCH_LIBRARY variant doesn't run into this problem, because it +/// enforces that it can only be called once for a given namespace. +#define _TORCH_LIBRARY_FRAGMENT(ns, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( \ + torch::Library::FRAGMENT, \ + &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), \ + #ns, \ + c10::nullopt, \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library & m) + +/// Macro for defining a function that will be run at static +/// initialization time to define operator overrides for dispatch key +/// `k` (must be an unqualified enum member of c10::DispatchKey) in +/// namespace `ns` (must be a valid C++ identifer, no quotes). Use this +/// macro when you want to implement a preexisting set of custom +/// operators on a new dispatch key (e.g., you want to provide CUDA +/// implementations of already existing operators). One common usage +/// pattern is to use TORCH_LIBRARY() to define schema for all new +/// operators you want to define, and then use several +/// TORCH_LIBRARY_IMPL() blocks to provide implementations of the +/// operator for CPU, CUDA and Autograd. +/// +/// In some cases, you need to define something that applies to all namespaces, +/// not just one namespace (usually a fallback). In that case, use the reserved +/// namespace _, e.g., +/// +/// ``` +/// TORCH_LIBRARY_IMPL(_, XLA, m) { +/// m.fallback(xla_fallback); +/// } +/// ``` +/// +/// Example usage: +/// +/// ``` +/// TORCH_LIBRARY_IMPL(myops, CPU, m) { +/// // m is a torch::Library; methods on it will define +/// // CPU implementations of operators in the myops namespace. +/// // It is NOT valid to call torch::Library::def() +/// // in this context. +/// m.impl("add", add_cpu_impl); +/// } +/// ``` +/// +/// If ``add_cpu_impl`` is an overloaded function, use a +/// ``static_cast`` to specify which overload you want +/// (by providing the full type). +/// +// NB: if the dispatch key is not whitelisted, we simply omit the Library +// call entirely +#define TORCH_LIBRARY_IMPL(ns, k, m) _TORCH_LIBRARY_IMPL(ns, k, m, C10_UID) + +/// \private +/// +/// The above macro requires an extra unique identifier (uid) to prevent +/// variable name collisions. This can happen if TORCH_LIBRARY_IMPL is called +/// multiple times with the same namespace and dispatch key in the same +/// translation unit. +#define _TORCH_LIBRARY_IMPL(ns, k, m, uid) \ + static void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library&); \ + static const torch::detail::TorchLibraryInit C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( \ + torch::Library::IMPL, \ + (c10::impl::dispatch_key_allowlist_check(c10::DispatchKey::k) \ + ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) \ + : [](torch::Library&) -> void {}), \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__); \ + void C10_CONCATENATE( \ + TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library & m) + +// These are variants of the macros above which are to be used for testing (they +// don't setup the static initializer, so you can control the visibility of +// the allocated library yourself). +// +// DO NOT use these in production code, they are NOT understood by the +// code analyzer and will be incorrectly analyzed in those situations. + +/// \private +#define MAKE_TORCH_LIBRARY(ns) \ + torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) +/// \private +#define MAKE_TORCH_LIBRARY_IMPL(ns, k) \ + torch::Library( \ + torch::Library::IMPL, \ + #ns, \ + c10::make_optional(c10::DispatchKey::k), \ + __FILE__, \ + __LINE__) + +// Make the custom class API visible, so it is available from +// torch::Library. + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/torch/script.h b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/script.h new file mode 100644 index 0000000000000000000000000000000000000000..58510670613b58ec9b39f3d69d652be6cc0ce998 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/torch/script.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/xnnpack.h b/llmeval-env/lib/python3.10/site-packages/torch/include/xnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..e71be0fd57ffc1ef2cc67b2fc8fb20fc4288a1d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/xnnpack.h @@ -0,0 +1,6172 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// Copyright 2019 Google LLC +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/// The number of bytes XNNPACK may read beyond array bounds. +/// The caller must allocate at least this many extra bytes after the tensor data passed to XNNPACK. +/// +/// Note: XNNPACK reads, but never writes beyond array bounds. +#define XNN_EXTRA_BYTES 16 + +/// Maximum number of dimensions in tensor shape. +#define XNN_MAX_TENSOR_DIMS 6 + +/// Allow sparse inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider sparse inference, but does not guarantee it. +#define XNN_FLAG_HINT_SPARSE_INFERENCE 0x00000001 + +/// Allow IEEE FP16 inference in a Runtime. +/// +/// Note: this flag hints XNNPACK to consider IEEE FP16 inference, but does not guarantee it. +#define XNN_FLAG_HINT_FP16_INFERENCE 0x00000002 + +/// Force IEEE FP16 inference in a Runtime, and fail if FP16 inference is not possible. +/// +/// Note: this flag guarantees that XNNPACK will use IEEE FP16 inference, or fail to create the Runtime object. +/// Warning: on x86 systems FP16 computations will be emulated at a substantial performance cost. +#define XNN_FLAG_FORCE_FP16_INFERENCE 0x00000004 + +/// Enable timing of each operator's runtime. +#define XNN_FLAG_BASIC_PROFILING 0x00000008 + +/// Enable the just-in-time compiler. +#define XNN_FLAG_JIT 0x00000010 + +/// The convolution operator represents a depthwise convolution, and use HWGo layout for filters. +#define XNN_FLAG_DEPTHWISE_CONVOLUTION 0x00000001 + +/// Assume transposed weights in a fully connected operator. +#define XNN_FLAG_TRANSPOSE_WEIGHTS 0x00000001 + +/// The operator assumes NHWC layout for the input, regardless of the output layout. +#define XNN_FLAG_INPUT_NHWC 0x00000002 + +/// Match "SAME" padding in TensorFlow. Exact padding values are computed dynamically depending on input size. +#define XNN_FLAG_TENSORFLOW_SAME_PADDING 0x00000004 + +/// Assume transposed weights in a batch matrix multiply operator. +#define XNN_FLAG_TRANSPOSE_B XNN_FLAG_TRANSPOSE_WEIGHTS + +/// Assume transposed input in a batch matrix multiply operator. +#define XNN_FLAG_TRANSPOSE_A 0x00000002 + +/// Implicitly flatten and reshape input of a Fully Connected operator into a 2D tensor. +#define XNN_FLAG_TENSORFLOW_RESHAPE_2D 0x00000004 + +/// Match behaviour of TensorFlow 1.x. +#define XNN_FLAG_TENSORFLOW_LEGACY_MODE 0x00000004 + +/// Static weights of the FP16 operator are in FP32 format. +#define XNN_FLAG_FP32_STATIC_WEIGHTS 0x00000008 + +/// Align corners of input and output images in resize operations. +#define XNN_FLAG_ALIGN_CORNERS 0x00000008 + +/// Yield worker threads of the thread pool to the system scheduler after the inference. +#define XNN_FLAG_YIELD_WORKERS 0x00000010 + +/// Use transient indirection buffer to reduce memory footprint +#define XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER 0x00000020 + +/// Reduce the dimensions. +#define XNN_FLAG_REDUCE_DIMS 0x00000040 + +/// The number of entries in an array of xnn_dynamic_quantization_params that XNNPACK may read beyond array bounds. +/// The caller must allocate at least this many extra xnn_dynamic_quantization_params before passing the array to XNNPACK. +/// +/// Note: XNNPACK reads, but never writes beyond array bounds. +#define XNN_EXTRA_QUANTIZATION_PARAMS 8 + +struct xnn_dynamic_quantization_params { + int32_t zero_point; + float scale; +}; + +/// Status code for any XNNPACK function call. +enum xnn_status { + /// The call succeeded, and all output arguments now contain valid data. + xnn_status_success = 0, + xnn_status_uninitialized = 1, + xnn_status_invalid_parameter = 2, + xnn_status_invalid_state = 3, + xnn_status_unsupported_parameter = 4, + xnn_status_unsupported_hardware = 5, + xnn_status_out_of_memory = 6, + xnn_status_reallocation_required = 7, +}; + +struct xnn_allocator { + /// User-specified pointer that will be passed as-is to all functions in this structure. + void* context; + /// Pointer to a function to be called for general memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*allocate)(void* context, size_t size); + /// Pointer to a function to be called for general memory re-allocation, i.e. to increase or shrink a previously + /// allocated memory block. The content of the old memory block is copied to the new memory block. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref reallocate call is equivalent to an @ref allocate call. + /// @param size - The new size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the newly allocated memory block of at least @ref size bytes with the content of the previous + /// memory block. + /// If allocation fails, the function must return NULL, but must not release the previous memory block. + void* (*reallocate)(void* context, void* pointer, size_t size); + /// Pointer to a function to be called for general memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref allocate or @ref reallocate functions. Can be NULL. + /// If the pointer is NULL, the @ref deallocate call is a no-op. + void (*deallocate)(void* context, void* pointer); + /// Pointer to a function to be called for aligned memory allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param alignment - The alignment of the memory block to allocate, in bytes. Alignment is always a power-of-2. + /// @param size - The size of the memory block to allocate, in bytes. + /// + /// @returns Pointer to the allocated memory block of at least @ref size bytes. + /// If allocation fails, the function must return NULL. + void* (*aligned_allocate)(void* context, size_t alignment, size_t size); + /// Pointer to a function to be called for aligned memory de-allocation. + /// + /// @param context - The user-specified pointer from xnn_allocator structure. + /// @param pointer - Pointer to a memory block allocated by @ref aligned_allocate function. Can be NULL. + /// If the pointer is NULL, the @ref aligned_deallocate call is a no-op. + void (*aligned_deallocate)(void* context, void* pointer); +}; + +/// Initialize XNNPACK library. +/// +/// XNNPACK must be successfully initialized before use. During initialization, XNNPACK populates internal structures +/// depending on the host processor. Initialization can be time-consuming. +/// +/// @param[in] allocator - structure with function pointers to be use for memory allocation and de-allocation. +/// If this argument is NULL, system-provided memory management functions (e.g. malloc/free) +/// will be used. +/// +/// @retval xnn_status_success - XNNPACK is successfully initialized and ready to use. +/// @retval xnn_status_out_of_memory - initialization failed due to out-of-memory condition. +/// @retval xnn_status_unsupported_hardware - initialization failed because the host processor does not satisfy the +/// minimum hardware requirements for XNNPACK. E.g. this may happen on x86 +/// processors without SSE2 extension, or on 32-bit ARM processors without +/// the NEON SIMD extension. +enum xnn_status xnn_initialize(const struct xnn_allocator* allocator); + +/// Deinitialize XNNPACK library. +/// +/// To avoid memory and resource leaks, users must call xnn_deinitialize once for each successful xnn_initialize call. +/// +/// @retval xnn_status_success - deinitialization call succeeded. +enum xnn_status xnn_deinitialize(void); + +/// Subgraph is an abstract representation of a neural network model. +/// Subgraph objects are used to define Values (tensors) and Nodes (operators) comprising the model. +typedef struct xnn_subgraph* xnn_subgraph_t; + +/// Create a empty Subgraph object. +/// +/// @param external_value_ids - number of Value IDs to reserve for communication with external graph representation. +/// The Subgraph object would avoid creating internal Value IDs in the +/// [0, reserved_value_ids-1] range. +/// @param flags - binary features of the subgraph. No supported flags are currently defined. +/// @param subgraph_out - pointer to the variable that will be initialized with a handle to the Subgraph object upon +/// successful return. +enum xnn_status xnn_create_subgraph( + uint32_t external_value_ids, + uint32_t flags, + xnn_subgraph_t* subgraph_out); + +/// Destroy a Subgraph object, as well as Values, and Nodes associated with the subgraph. +/// +/// @param subgraph - the Subgraph object to destroy. +enum xnn_status xnn_delete_subgraph( + xnn_subgraph_t subgraph); + +#define XNN_VALUE_FLAG_EXTERNAL_INPUT 0x00000001 +#define XNN_VALUE_FLAG_EXTERNAL_OUTPUT 0x00000002 +#define XNN_VALUE_FLAG_PERSISTENT 0x00000004 + +#define XNN_INVALID_VALUE_ID UINT32_MAX + +/// Type of elements in a Value object. +enum xnn_datatype { + /// Invalid data type. Valid Values never have this datatype. + xnn_datatype_invalid = 0, + /// IEEE754 single-precision floating-point. + xnn_datatype_fp32 = 1, + /// IEEE754 half-precision floating-point. + xnn_datatype_fp16 = 2, + /// Quantized 8-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint8 = 3, + /// Quantized 8-bit unsigned integer with shared per-Value quantization parameters. + xnn_datatype_quint8 = 4, + /// Quantized 32-bit signed integer with shared per-Value quantization parameters. + xnn_datatype_qint32 = 5, + /// Quantized 8-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint8 = 6, + /// Quantized 32-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint32 = 7, + /// Quantized 4-bit signed integer with shared per-channel quantization parameters. + xnn_datatype_qcint4 = 8, + /// Dynamically quantized 8-bit signed integer with per-batch quantization parameters. + xnn_datatype_qdint8 = 9, +}; + +/// Define a tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - multiplication factor to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + int32_t zero_point, + float scale, + size_t num_dims, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +enum xnn_status xnn_define_channelwise_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Validate the dimensions, channel_dim, zero point, datatype, and scale of a quantized tensor-type. +/// +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - multiplication factor to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_validate_quantized_tensor( + enum xnn_datatype datatype, + int32_t zero_point, + float scale, + size_t num_dims, + const size_t* dims); + +/// Validate the dimensions, channel_dim, zero point, datatype, and scales of a channelwise quantized tensor-type. +/// +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - per-channel multiplication factors to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters. +/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution, +/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in +/// the Depthwise Convolution operators. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_validate_channelwise_quantized_tensor( + enum xnn_datatype datatype, + int32_t zero_point, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims); + +/// Define a channelwise quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param zero_point - offset from zero to subtract from the quantized elements in the Value. +/// @param scale - per-channel multiplication factors to convert quantized elements to real representation. +/// @param num_dims - number of dimensions in the shape. +/// @param channel_dim - index of the channel dimension in the tensor with per-channel quantization parameters. +/// Typically this is the first dimension (dimension #0) of the filter tensors in the Convolution, +/// Deconvolution, and Fully Connected operators and the last dimension of the filter tensors in +/// the Depthwise Convolution operators. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param data - pointer to static data used for tensor initialization. If the tensor is not statically initialized, +/// this pointer must be is NULL. If non-NULL, the life-time of the static data must exceed the life-time +/// of the Subgraph object, and of any Runtime objects created from the Subgraph. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. Supported values are any combination of XNN_VALUE_FLAG_EXTERNAL_INPUT +/// and XNN_VALUE_FLAG_EXTERNAL_OUTPUT. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_channelwise_quantized_tensor_value_v2( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + int32_t zero_point, + const float* scale, + size_t num_dims, + size_t channel_dim, + const size_t* dims, + const void* data, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a dynamically quantized tensor-type Value and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Value. +/// @param datatype - type of the tensor elements. +/// @param num_dims - number of dimensions in the shape. +/// @param num_non_batch_dims - number of non-batch dimensions in the shape. The leading (num_dims - num_non_batch_dims) +/// dimensions will be flattened and treated as batch size. A set of quantization parameters +/// will be calculated for each batch element. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param flags - binary features of the Value. No supported flags are currently defined. +/// @param id_out - pointer to the variable that will be initialized with the Value ID upon successful return. If a +/// valid @a external_id was provided, the variable will be initialized with the @a external_id value. +enum xnn_status xnn_define_dynamically_quantized_tensor_value( + xnn_subgraph_t subgraph, + enum xnn_datatype datatype, + size_t num_dims, + size_t num_nonbatch_dims, + const size_t* dims, + uint32_t external_id, + uint32_t flags, + uint32_t* id_out); + +/// Define a Convert Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Convert Node. No supported flags are currently defined. +enum xnn_status xnn_define_convert( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with [groups * +/// group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Deconvolution (Transposed Convolution) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param adjustment_height - additional elements in the bottom of the 2D output data. +/// @param adjustment_width - additional elements to the right of the 2D output data. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param upsampling_height - height of upsampling region for deconvolution input (deconvolution height stride). +/// @param upsampling_width - width of upsampling region for deconvolution input (deconvolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param groups - number of convolution groups. +/// @param group_input_channels - number of input channels per group. +/// @param group_output_channels - number of output channels per group. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, groups * group_input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [groups * group_output_channels, kernel_height, kernel_width, group_input_channels] +/// dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Convolution Node without a bias. If +/// present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [groups * group_output_channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, groups * group_output_channels] dimensions. +/// @param flags - binary features of the 2D Deconvolution Node. No supported flags are currently defined. +enum xnn_status xnn_define_deconvolution_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t adjustment_height, + uint32_t adjustment_width, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t upsampling_height, + uint32_t upsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Depthwise Convolution Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param kernel_height - kernel (filter) height. +/// @param kernel_width - kernel (filter) width. +/// @param subsampling_height - height of subsampling region for convolution output (convolution height stride). +/// @param subsampling_width - width of subsampling region for convolution output (convolution width stride). +/// @param dilation_height - dilation of kernel elements along the height dimension. +/// @param dilation_width - dilation of kernel elements along the width dimension. +/// @param depth_multiplier - ratio of output channels to input channels. +/// @param input_channels - number of input channels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, input_channels] dimensions +/// @param filter_id - Value ID for the filter tensor. The filter tensor must ge a 4D tensor defined in the @a subgraph +/// with [1, kernel_height, kernel_width, input_channels * depth_multiplier] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a 2D Depthwise Convolution Node without +/// a bias. If present, the bias tensor must be a 1D tensor defined in the @a subgraph with +/// [input_channels * depth_multiplier] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, input_channels * depth_multiplier] dimensions. +/// @param flags - binary features of the 2D Depthwise Convolution Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_depthwise_convolution_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t depth_multiplier, + size_t input_channels, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Depth To Space Node 2D and add it to a Subgraph. +/// +/// The Depth To Space 2D Node rearranges data from depth into blocks of spatial data (a reverse transform to +/// Space To Depth). For a given input pixel, an output square of pixels with side @a block_size is formed from values +/// in the corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times +/// smaller than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param block_size - the size of the spatial block. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_depth_to_space_2d( + xnn_subgraph_t subgraph, + uint32_t block_size, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +enum xnn_status xnn_define_depth_to_space( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t block_size, + uint32_t flags); + +/// Define a 1D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 1D Global Average Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_average_pooling_1d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Global Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 2D Global Average Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_average_pooling_2d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 1D Global Sum Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 2 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second-innermost dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 2 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 1D Global Sum Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_sum_pooling_1d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Global Sum Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with 3 or more dimensions +/// defined in the @a subgraph. Averaging is performed across the second- and third-innermost +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor with 3 or more +/// dimensions defined in the @a subgraph. +/// @param flags - binary features of the 2D Global Sum Pooling Node. The only currently supported value is +/// XNN_FLAG_REDUCE_DIMS. +enum xnn_status xnn_define_global_sum_pooling_2d( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Average Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Average Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_average_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Fully Connected Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the +/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least +/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if +/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be +/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of +/// [num_input_elements] dimensions, then reshaped into a 2D tensor of +/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph. +/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have +/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is +/// specified, the filter tensor must have [input_channels, output_channels] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias. +/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels] +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same +/// dimensionality as the input tensor, all its dimensions but the last one must match the +/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must +/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output +/// must be a 2D tensor of [batch_size, output_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of +/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param flags - binary features of the Fully Connected Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS. +enum xnn_status xnn_define_fully_connected( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Sparse Fully Connected Node and add it to a Subgraph. +/// +/// This operator is experimental, and will be removed in the future. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the +/// @a subgraph. If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the input tensor must be at least +/// 1D and its last dimension must match the last dimension of the filter tensor. In particular, if +/// input is a 2D tensor, it must have [batch_size, input_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, the number of elements in the input tensor must be +/// divisible by the input_channels. The tensor will be first flattened into a 1D tensor of +/// [num_input_elements] dimensions, then reshaped into a 2D tensor of +/// [num_input_elements / input_channels, input_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param filter_id - Value ID for the filter tensor. The filter tensor must a 2D tensor defined in the @a subgraph. +/// If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is not specified, the filter tensor must have +/// [output_channels, input_channels] dimensions. If the XNN_FLAG_TRANSPOSE_WEIGHTS flag is +/// specified, the filter tensor must have [input_channels, output_channels] dimensions. +/// @param bias_id - Value ID for the bias tensor, or XNN_INVALID_VALUE_ID for a Fully Connected Node without a bias. +/// If present, the bias tensor must be a 1D tensor defined in the @a subgraph with [output_channels] +/// dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is not specified, the output tensor must have the same +/// dimensionality as the input tensor, all its dimensions but the last one must match the +/// corresponding dimensions of the input tensor, and the last dimensions of the output tensor must +/// match the first dimension of the filter tensor. In particular, if input is a 2D tensor, output +/// must be a 2D tensor of [batch_size, output_channels] dimensions. +/// If XNN_FLAG_TENSORFLOW_RESHAPE_2D is specified, output must be a 2D tensor of +/// [num_input_elements / input_channels, output_channels] dimensions where num_input_elements is the +/// total number of elements in the input tensor. +/// @param flags - binary features of the Fully Connected Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_RESHAPE_2D and XNN_FLAG_TRANSPOSE_WEIGHTS. +enum xnn_status xnn_define_fully_connected_sparse( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t filter_id, + uint32_t bias_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D Max Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. Must be 0 if XNN_FLAG_TENSORFLOW_SAME_PADDING +/// flag is specified. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. Must be 0 if +/// XNN_FLAG_TENSORFLOW_SAME_PADDING flag is specified. +/// @param pooling_height - pooling (kernel) height. +/// @param pooling_width - pooling (kernel) width. +/// @param stride_height - displacing of the pooling window in the vertical dimension of the input pixels corresponding +/// to vertically adjacent output pixels. +/// @param stride_width - displacing of the pooling window in the horizontal dimension of the input pixels corresponding +/// to horizontally adjacent output pixels. +/// @param dilation_height - dilation of pooling elements along the height dimension. +/// @param dilation_width - dilation of pooling elements along the width dimension. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D Max Pooling Node. The only currently supported values is +/// XNN_FLAG_TENSORFLOW_SAME_PADDING. +enum xnn_status xnn_define_max_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2D ArgMax Pooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_padding_top - implicit zero-padding above 2D input data. +/// @param input_padding_right - implicit zero-padding to the right of 2D input data. +/// @param input_padding_bottom - implicit zero-padding below 2D input data. +/// @param input_padding_left - implicit zero-padding to the left of 2D input data. +/// @param pooling_height - pooling (kernel) height. Vertical stride between pooling regions match this value. +/// @param pooling_width - pooling (kernel) width. Horizontal stride between pooling regions match this value. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, channels] dimensions +/// @param output_value_id - Value ID for the output tensor with the maximum values in the pools. The output tensor must +/// be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] dimensions. +/// @param output_index_id - Value ID for the output tensor with the indexes of the maximum values in the pools. The +/// output tensor must be a 4D tensor defined in the @a subgraph with [N, OH, OW, channels] +/// dimensions. +/// @param flags - binary features of the 2D ArgMax Pooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_argmax_pooling_2d( + xnn_subgraph_t subgraph, + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_id, + uint32_t output_value_id, + uint32_t output_index_id, + uint32_t flags); + +/// Define a 2D UnPooling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param padding_top - implicit padding above 2D output data. +/// @param padding_right - implicit padding to the right of 2D output data. +/// @param padding_bottom - implicit padding below 2D output data. +/// @param padding_left - implicit padding to the left of 2D output data. +/// @param pooling_height - height of the pooling window. +/// @param pooling_width - width of the pooling window. +/// @param input_value_id - Value ID for the input tensor with the max-pooling values to invert. The input value tensor +/// must be a 4D tensor defined in the @a subgraph with [N, IH, IW, channels] dimensions. +/// @param input_index_id - Value ID for the input tensor with the indices of the per-pool maximum values produced by +/// a 2D UnPooling Node. The input tensor must be a 4D tensor defined in the @a subgraph with +/// [N, IH, IW, channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, OH, OW, channels] dimensions. +/// @param flags - binary features of the 2D UnPooling Node. No supported flags are currently defined. +enum xnn_status xnn_define_unpooling_2d( + xnn_subgraph_t subgraph, + uint32_t padding_top, + uint32_t padding_right, + uint32_t padding_bottom, + uint32_t padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t input_value_id, + uint32_t input_index_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Add Node and add it to a Subgraph. +/// +/// The 2-Input Add Node computes elementwise addition of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Add Node. No supported flags are currently defined. +enum xnn_status xnn_define_add2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Multiply Node and add it to a Subgraph. +/// +/// The 2-Input Multiply Node computes elementwise multiplication of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Multiply Node. No supported flags are currently defined. +enum xnn_status xnn_define_multiply2( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +// Cap operations applied to logits (Q * K) of attention operator. +enum xnn_attention_logits_cap_type { + // No capping. + xnn_attention_logits_cap_type_none = 0, + // Cap the absolute values of logits by tanh: tanh(logits / cap) * cap + xnn_attention_logits_cap_type_tanh +}; + +// Params when the cap type is xnn_attention_logits_cap_type_tanh. +struct xnn_attention_logits_cap_tanh_params { + float cap; +}; + +/// Define a Scaled Dot-Product Attention Node and add it to a Subgraph. +/// +/// This operator is experimental. +/// +/// The Scaled Dot-Product Attention Node computes a multi-head or multi-query scaled dot attention on the query, key, +/// and value tensors. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param cap_type - type of cap to be applied to the logits. +/// @param cap_params - parameters for the cap. Must be a pointer to xnn_attention_logits_cap_tanh_params if cap_type +/// is xnn_attention_logits_cap_type_tanh. +/// @param query_id - Value ID for the query tensor. The query tensor must be a 3+-dimensional tensor defined in the +/// @a subgraph with the dimensions as [*, H, T, C], where H/T/C are the heads/tokens/channels, and * +/// is the 0 or more dimensions treated as batch size. +/// @param key_id - Value ID for the key tensor. The key tensor must be a 2+--dimensional tensor defined in the +/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as +/// [*, H, U, C] (multi-head), or have 1 less dimension than the query, with the dimensions as +/// as [*, U, C] (multi-query, number of heads omitted implies single head), where H/U/C are the +/// heads/key_value_tokens/channels, and * is the 0 or more dimensions treated as batch size. These +/// batch size dimensions must be the same as query. +/// @param value_id - Value ID for the value tensor. The value tensor must be a 2+--dimensional tensor defined in the +/// @a subgraph. It can have the same number of dimensions as the query, with the dimensions as +/// [*, H, U, D] (multi-head), or have 1 less dimension than the query, with the dimensions as +/// as [*, U, D] (multi-query, number of heads omitted implies single head), where H/U/D are the +/// heads/key_value_tokens/value_channels, and * is the 0 or more dimensions treated as batch size. +/// These batch size dimensions must be the same as query and key. +/// @param scale_id - Value ID for the scale tensor. The scale tensor must be a 1D tensor defined in the @a subgraph +/// with [C] dimensions. The query tensor is multiplied with this scale tensor before the dot product +/// with the key tensor. +/// @param mask_id - Value ID for the mask tensor. The mask tensor must be a 2D tensor defined in the @a subgraph with +/// [T, U] dimensions. The mask tensor is added to the logits (query dot value). +/// @param output_id - Value ID for the output tensor. The output tensor must be a 3+-dimensional tensor defined in the +/// @a subgraph with the dimensions as [*, H, T, D], where H/T/D are the heads/tokens/value_channels, +/// and * is the 0 or more dimensions treated as batch size. These batch size dimensions must be the +/// same as query, key, and value. +/// @param flags - binary features of the Scaled Dot Product Attention Node. No supported flags are currently defined. +enum xnn_status xnn_define_scaled_dot_product_attention( + xnn_subgraph_t subgraph, + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t query_id, + uint32_t key_id, + uint32_t value_id, + uint32_t scale_id, + uint32_t mask_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Subtract Node and add it to a Subgraph. +/// +/// The Subtract Node computes elementwise subtraction of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Subtract Node. No supported flags are currently defined. +enum xnn_status xnn_define_subtract( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Divide Node and add it to a Subgraph. +/// +/// The Divide Node computes elementwise division of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Divide Node. No supported flags are currently defined. +enum xnn_status xnn_define_divide( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Maximum Node and add it to a Subgraph. +/// +/// The 2-Input Maximum Node computes elementwise maximum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Maximum Node. No supported flags are currently defined. +enum xnn_status xnn_define_maximum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Minimum Node and add it to a Subgraph. +/// +/// The 2-Input Minimum Node computes elementwise minimum of two tensor inputs with numpy broadcasting rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Minimum Node. No supported flags are currently defined. +enum xnn_status xnn_define_minimum2( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Squared Difference Node and add it to a Subgraph. +/// +/// The Squared Difference Node computes elementwise squared difference of two tensor inputs with numpy broadcasting +/// rules. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the second +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an M-dimensional tensor defined in +/// the @a subgraph with each dimension either equal to the corresponding dimension of the first +/// input, or equal to 1. In the latter case, the elements of the input tensor are broadcasted along +/// that dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be a max(N,M)-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the maximum between the corresponding dimension +/// of the two inputs. +/// @param flags - binary features of the Squared Difference Node. No supported flags are currently defined. +enum xnn_status xnn_define_squared_difference( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Constant Pad Node with static padding specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param pre_paddings - number of padding elements to insert before input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param post_paddings - number of padding elements to insert after input elements for every dimension. This array +/// must have as many elements as the number of dimensions in the input tensor. +/// @param padding_value - constant value used to initialize padding elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Constant Pad Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_constant_pad( + xnn_subgraph_t subgraph, + const size_t* pre_paddings, + const size_t* post_paddings, + float padding_value, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Mean Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_reduction_axes - number of axes along which mean is computed. +/// @param reduction_axes - axes along which mean is computed. +/// @param input_id - Value ID for the input tensor. The input tensor must be a dense tensor with at least +/// @a num_reduction_axes dimensions defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be a dense tensor defined in the +/// @a subgraph with @a num_reduction_axes fewer dimensions than the input tensor (if +/// XNN_FLAG_REDUCE_DIMS is specified), or has same dimension rank but the dimension at +/// @a reduction_axes reduced to 1 (if XNN_FLAG_REDUCE_DIMS is not specified). +/// @param flags - binary features of the Mean Node. The only currently supported value is XNN_FLAG_REDUCE_DIMS +enum xnn_status xnn_define_static_mean( + xnn_subgraph_t subgraph, + size_t num_reduction_axes, + const size_t* reduction_axes, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Input Concatenate Node and add it to a Subgraph. +/// +/// The 2-Input Concatenate Node concatenates two tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the two input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// second input. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// first input. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of both inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of both inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate2( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 3-Input Concatenate Node and add it to a Subgraph. +/// +/// The 3-Input Concatenate Node concatenates three tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the three input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate3( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 4-Input Concatenate Node and add it to a Subgraph. +/// +/// The 4-Input Concatenate Node concatenates four tensors along a specified axis. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param axis - the axis to concatenate the four input tensors along +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input3_id - Value ID for the third input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param input4_id - Value ID for the fourth input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph with each dimension, except the axis, equal to the corresponding dimension of the +/// other inputs. +/// @param output_id - Value ID for the output tensor. The output tensor must be a N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to the dimension of all inputs, except the axis +/// dimension, where it is the sum of the corresponding dimensions of all inputs. +/// @param flags - binary features of the Concatenate Node. No supported flags are currently defined. +enum xnn_status xnn_define_concatenate4( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t input4_id, + uint32_t output_id, + uint32_t flags); + +enum xnn_status xnn_define_concatenate5( + xnn_subgraph_t subgraph, + size_t axis, + uint32_t input1_id, + uint32_t input2_id, + uint32_t input3_id, + uint32_t input4_id, + uint32_t input5_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Copy Node and add it to a Subgraph. +/// +/// The Copy Node copies an input tensor to an output tensor. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the first input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Copy Node. No supported flags are currently defined. +enum xnn_status xnn_define_copy( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a 2-Output Split Node and add it to a Subgraph. +/// +/// The 2-Output Split Node splits an input tensor into two output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second output. The split_dim dimension is half of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first output. The split_dim dimension is half of the input's split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split2( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t flags); + +/// Define a 3-Output Split Node and add it to a Subgraph. +/// +/// The 3-Output Split Node splits an input tensor into three output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the second and third output. The split_dim dimension is one third of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the first and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the second and third output. The split_dim dimension is one third of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split3( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t flags); + +/// Define a 4-Output Split Node and add it to a Subgraph. +/// +/// The 4-Output Split Node splits an input tensor into four output tensors along a specified axis evenly. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param split_dim - the dimension to split the input tensor along +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in the @a +/// subgraph. +/// @param output1_id - Value ID for the first output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension, except the axis, equal to the corresponding dimension +/// of the other output tensors. The split_dim dimension is one fourth of the input's split_dim. +/// @param output2_id - Value ID for the second output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output3_id - Value ID for the third output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param output4_id - Value ID for the fourth output tensor. The output tensor must be an N-dimensional tensor +/// defined in the @a subgraph with each dimension, except the axis, equal to the corresponding +/// dimension of the other output tensors. The split_dim dimension is one fourth of the input's +/// split_dim. +/// @param flags - binary features of the Split Node. No supported flags are currently defined. +enum xnn_status xnn_define_even_split4( + xnn_subgraph_t subgraph, + size_t split_dim, + uint32_t input_id, + uint32_t output1_id, + uint32_t output2_id, + uint32_t output3_id, + uint32_t output4_id, + uint32_t flags); + +/// Define a Reshape Node with static shape specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the output tensor. +/// @param new_shape - shape dimensions of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor with padding. +/// @param flags - binary features of the Reshape Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_reshape( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* new_shape, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Node that reshapes a tensor to two dimensions, retaining the +/// trailing dimension, and add it to a Subgraph. +/// +/// This operator is experimental. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be +/// defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be +/// defined in the @a subgraph, and its +/// size must match the shape of the input tensor with +/// padding. +/// @param flags - binary features of the Reshape Node. No supported flags are +/// currently defined. +enum xnn_status xnn_define_reshape_2d(xnn_subgraph_t subgraph, + uint32_t input_id, uint32_t output_id, + uint32_t flags); + +/// Define a 2D Resize Bilinear Node with static output height & width specification and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param new_height - height dimension of the output tensor. +/// @param new_width - width dimension of the output tensor. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, C] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, new_height, new_width, C] dimensions. +/// @param flags - binary features of the 2D Resize Bilinear Node. The only currently supported values are +/// XNN_FLAG_TENSORFLOW_LEGACY_MODE and XNN_FLAG_ALIGN_CORNERS, which are mutually exclusive. +enum xnn_status xnn_define_static_resize_bilinear_2d( + xnn_subgraph_t subgraph, + size_t new_height, + size_t new_width, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a PReLU (Parametric ReLU) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param slope_id - Value ID for the slope tensor. The slope tensor must be a 1D tensor defined in the @a subgraph with +/// [channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, H, W, channels] dimensions. +/// @param flags - binary features of the PReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_prelu( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t slope_id, + uint32_t output_id, + uint32_t flags); + +/// Define a RoPE (Rotary Positional Embeddings) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param max_tokens - maximum possible number of tokens (maximum sequence length) of the input/output tensors. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [batch, tokens, heads, channels] dimensions. +/// @param weights_id - Value ID for the weights tensor. The weights tensor must be a 2D tensor defined in the +/// @a subgraph with [max_tokens, channels] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [batch, tokens, heads, channels] dimensions. +/// @param flags - binary features of the RoPE Node. No supported flags are currently defined. +enum xnn_status xnn_define_rope( + xnn_subgraph_t subgraph, + size_t max_sequence_size, + uint32_t input_id, + uint32_t weights_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Abs Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Abs Node. No supported flags are currently defined. +enum xnn_status xnn_define_abs( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Bankers' Rounding Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Bankers' Rounding Node. No supported flags are currently defined. +enum xnn_status xnn_define_bankers_rounding( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Batch Matrix Multiply Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input1_id - Value ID for the first input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the second input +/// tensor. The last 2 dimensions are [M, K]. If XNN_FLAG_TRANSPOSE_B is not specified, the last +/// dimension must match the second last dimension of the second input tensor. If +/// XNN_FLAG_TRANSPOSE_B is specified, the last dimension must match the last dimension of the +/// second input tensor. +/// @param input2_id - Value ID for the second input tensor. The input tensor must be an N-dimensional tensor defined +/// in the @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first input +/// tensor. If XNN_FLAG_TRANSPOSE_B is not specified, the last 2 dimensions are [K, N], and the +/// second last dimension must match the last dimension of the first input tensor. If +/// XNN_FLAG_TRANSPOSE_B is specified, the last 2 dimensions are [N, K], and the last dimension must +/// match the last dimension of the first input tensor. +/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined in the +/// @a subgraph. It must be at least 3D. The first N-2 dimensions must match the first and second +/// input tensors . The last 2 dimensions must be [M, N]. +/// @param flags - binary features of the Batch Matrix Multiply Node. The only currently supported value is +/// XNN_FLAG_TRANSPOSE_B. +enum xnn_status xnn_define_batch_matrix_multiply( + xnn_subgraph_t subgraph, + uint32_t input1_id, + uint32_t input2_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Ceiling Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Ceiling Node. No supported flags are currently defined. +enum xnn_status xnn_define_ceiling( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Clamp Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param output_min - lower bound for clipping output values. +/// @param output_max - upper bound for clipping output values. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Clamp Node. No supported flags are currently defined. +enum xnn_status xnn_define_clamp( + xnn_subgraph_t subgraph, + float output_min, + float output_max, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define an ELU (Exponential Linear Unit) Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param alpha - scale factor for negative output elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the ELU Node. No supported flags are currently defined. +enum xnn_status xnn_define_elu( + xnn_subgraph_t subgraph, + float alpha, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Floor Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Floor Node. No supported flags are currently defined. +enum xnn_status xnn_define_floor( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a HardSwish Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the HardSwish Node. No supported flags are currently defined. +enum xnn_status xnn_define_hardswish( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Leaky ReLU Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param negative_slope - scale factor for negative input elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Leaky ReLU Node. No supported flags are currently defined. +enum xnn_status xnn_define_leaky_relu( + xnn_subgraph_t subgraph, + float negative_slope, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Negate Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Negate Node. No supported flags are currently defined. +enum xnn_status xnn_define_negate( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Sigmoid Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Sigmoid Node. No supported flags are currently defined. +enum xnn_status xnn_define_sigmoid( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a SoftMax Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph, and have at +/// least one dimension. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the SoftMax Node. No supported flags are currently defined. +enum xnn_status xnn_define_softmax( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Space To Depth 2D Node and add it to a Subgraph. +/// +/// The Space To Depth 2D Node rearranges blocks of spatial data into blocks (a reverse transform to Depth To Space 2D). +/// For a given input pixel, an output square of pixels with side @a block_size is formed from values in the +/// corresponding number of its channels. The output depth is therefore @a block_size x @a block_size times greater +/// than that of the input. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param block_size - the size of the spatial block. +/// @param input_id - Value ID for the input tensor. The input tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH * block_size, IW * block_size, OC] dimensions. +/// @param output_id - Value ID for the output tensor. The output tensor must be a 4D tensor defined in the @a subgraph +/// with [N, IH, IW, OC * block_size * block_size] dimensions. +/// @param flags - binary features of the input_channels Node. No supported flags are currently defined. +enum xnn_status xnn_define_space_to_depth_2d( + xnn_subgraph_t subgraph, + uint32_t block_size, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Node. No supported flags are currently defined. +enum xnn_status xnn_define_square( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Square Root Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Root Node. No supported flags are currently defined. +enum xnn_status xnn_define_square_root( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Reciprocal Square Root Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be +/// defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be +/// defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Square Root Node. No supported flags +/// are currently defined. +enum xnn_status xnn_define_reciprocal_square_root(xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Slice Node add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param num_dims - number of shape dimensions in the input and output tensor. +/// @param offsets - offsets in each dimension of the input tensor. This array must have @a num_dims elements. +/// @param sizes - size of each dimension in output tensor. This array must have @a num_dims elements. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// dimensions must match @a sizes. +/// @param flags - binary features of the Static Slice Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_slice( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* offsets, + const size_t* sizes, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Static Transpose Node and add it to a Subgraph. +/// +/// The Static Transpose Node applies a generalized transpose to the input tensor using the permuation in perm. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be an N-dimensional tensor defined in +/// the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be an N-dimensional tensor defined +/// in the @a subgraph with each dimension equal to its corresponding permuted input dimension. +/// @param num_dims - the number of permutation dimensions. This must be equal to the number of input dimensions. +/// @param perm - The permutation of the axis of the input tensor. The perm array must must contain 0 to N-1 in the +/// permuted order. +/// @param flags - binary features of the Static Transpose Node. No supported flags are currently defined. +enum xnn_status xnn_define_static_transpose( + xnn_subgraph_t subgraph, + size_t num_dims, + const size_t* perm, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Define a Tanh Node and add it to a Subgraph. +/// +/// @param subgraph - a Subgraph object that will own the created Node. +/// @param input_id - Value ID for the input tensor. The input tensor must be defined in the @a subgraph. +/// @param output_id - Value ID for the output tensor. The output tensor must be defined in the @a subgraph, and its +/// shape must match the shape of the input tensor. +/// @param flags - binary features of the Tanh Node. No supported flags are currently defined. +enum xnn_status xnn_define_tanh( + xnn_subgraph_t subgraph, + uint32_t input_id, + uint32_t output_id, + uint32_t flags); + +/// Code cache is a cache for JIT generated code. +typedef struct xnn_code_cache* xnn_code_cache_t; + +/// Weights cache can be finalized in these ways: +enum xnn_weights_cache_finalization_kind { + /// Weights cache is finalized, no insert operations into the weights cache is allowed, even if the "inserted" + /// weights already exist in thee cache. Weights cache memory will also be trimmed to page boundary and set to + /// read-only (to prevent writes). + xnn_weights_cache_finalization_kind_hard, + /// Weights cache will be finalized with some extra space at the end, this allows for "inserting" into the cache only + /// if the weights are already in the cache, and errors on inserting uncached weights. There is memory overhead. + xnn_weights_cache_finalization_kind_soft, +}; + +/// A combination of multiple factors to uniquely locate the weights cache. +struct xnn_weights_cache_look_up_key { + /// The unique seed for each ukernel. It is guaranteed that each ukernel provides + /// a consistent and identical seed. + uint32_t seed; + /// Pointer to the original kernel. + const void* kernel; + /// Pointer to the original bias, could be NULL. + const void* bias; +}; + +/// A group of function pointers to manage weights cache. All functions may be +/// called on multi threads. +struct xnn_weights_cache_provider { + /// User-specified pointer that will be passed as-is to all functions in this + /// structure. + void* context; + + /// Looks up the tuple of {cache_key, kernel, bias} in the cache. If it is found, + /// returns the offset to the found entry for reuse. Otherwise, returns SIZE_MAX. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param cache_key - The key used to locate the weights cache entry. + size_t (*look_up)(void* context, const struct xnn_weights_cache_look_up_key* cache_key); + + /// Ensures that cache has enough space for `n` bytes. Returns the address to + /// store weight cache. Returns NULL if fails to reserve space. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param n - size to be reserved. + void* (*reserve_space)(void* context, size_t n); + + /// Looks up packed weights at `ptr` in the cache. If it is found, reuse it. + /// Otherwise, it is added to the cache. Returns the offset to the cache. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param cache_key - The key used to locate the weights cache entry. + /// @param ptr - pointer pointing to the packed weight. + /// @param size - size of the packed weight. + size_t (*look_up_or_insert)(void* context, const struct xnn_weights_cache_look_up_key* cache_key, void* ptr, size_t size); + + /// Returns whether the cache is finalized. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + bool (*is_finalized)(void* context); + + /// Returns the absolute pointer corresponding to `offset`, where the offset is returned from + /// `look_up` or `get_or_insert`. This function must be called after finalize. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + /// @param offset - offset to the start of internal buffer + void* (*offset_to_addr)(void* context, size_t offset); + + /// Destroy a weights cache object, as well as memory used for the cache. + /// @param context - The user-specified pointer from xnn_weights_cache_provider structure. + enum xnn_status (*delete_cache)(void* context); +}; + +/// Weights cache is a cache for packed weights. It can be reused between runtimes. +typedef struct xnn_weights_cache_provider* xnn_weights_cache_t; + +/// Create a weights cache object specifying the initial size of weights cache (in bytes). +/// +/// @param[in] size - initial capacity of the weights cache (in bytes), i.e. it can hold size bytes without growing. +/// @param weights_cache_out - pointer to the variable that will be initialized to a handle to the weights cache provider +/// upon successful return. Once created, the weights cache provider can be shared between +/// different Runtime objects. +enum xnn_status xnn_create_weights_cache_with_size(size_t size, xnn_weights_cache_t* weights_cache_out); + +enum xnn_status xnn_create_weights_cache(xnn_weights_cache_t* weights_cache_out); + +/// Finalizes the weights cache. The kind of finalization is specified by `finalization_kind`. +/// @param weights_cache - the weights cache object to finalize. +/// @param finalization_kind - the kind of finalization. +enum xnn_status xnn_finalize_weights_cache( + xnn_weights_cache_t weights_cache, + enum xnn_weights_cache_finalization_kind finalization_kind); + +/// Destroy a weights cache object, as well as memory used for the cache. +/// @param weights_cache - the weights cache object to destroy. +enum xnn_status xnn_delete_weights_cache(xnn_weights_cache_t weights_cache); + +typedef struct xnn_workspace* xnn_workspace_t; + +/// Create a workspace object. +/// @param workspace_out - pointer to the variable that will be initialized to a handle to the workspace object upon +/// successful return. Once created, the workspace can be shared between different Runtime +/// objects. +enum xnn_status xnn_create_workspace(xnn_workspace_t* workspace_out); +/// Destroy a workspace object, as well as memory used by the workspace. Object destruction can be deferred until all +/// Runtime objects created with this workspace are destroyed. +/// @param workspace - the workspace object to destroy. +enum xnn_status xnn_release_workspace(xnn_workspace_t workspace); + +/// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values. +typedef struct xnn_runtime* xnn_runtime_t; + +enum xnn_profile_info { + /// Returns a size_t containing the number of operators. + xnn_profile_info_num_operators, + /// Returns a char[] containing the null character separated names of all operators. + xnn_profile_info_operator_name, + /// Returns a uint64_t[] with the runtimes of all operators in the same order as xnn_profile_info_operator_name. + xnn_profile_info_operator_timing, +}; + +/// Return profile information for all operators. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime, @ref xnn_create_runtime_v2 or +/// @ref xnn_create_runtime_v3. +/// @param param_name - type of profile information required. +/// @param param_value_size - the size in bytes of memory pointed to by param_value. If this is not sufficient then +/// param_value_size_ret will be set to the required size and xnn_status_out_of_memory will be +/// returned. +/// @param param_value - a pointer to memory location where appropriate values for a given param_value will be written. +/// @param param_value_size_ret - returns number of bytes required to write the result if param_value_size is not +/// sufficient. +enum xnn_status xnn_get_runtime_profiling_info(xnn_runtime_t runtime, + enum xnn_profile_info param_name, + size_t param_value_size, + void* param_value, + size_t* param_value_size_ret); + +/// Create a Runtime object from a subgraph. +/// +/// @param subgraph - a Subgraph object with all Values and Nodes that would be handled by the runtime. No Values or +/// Nodes can be added to the runtime once it is constructed. +/// @param weights_cache - a cache for packed weights. The runtime will look up and reuse packed weights in this cache, +/// this will reduce memory allocated for packed weights. +/// @param workspace - a workspace to hold internal tensors. The runtime will allocate space used for internal tensors +/// and track them using workspace. Workspace can be shared and reused across different runtimes. If +/// workspace is NULL, there will be no sharing: each runtime has its own workspace. +/// @param threadpool - the thread pool to be used for parallelisation of computations in the runtime. If the thread +/// pool is NULL, the computation would run on the caller thread without parallelization. +/// @param flags - binary features of the runtime. The only currently supported values are +/// XNN_FLAG_HINT_SPARSE_INFERENCE, XNN_FLAG_HINT_FP16_INFERENCE, XNN_FLAG_FORCE_FP16_INFERENCE, +/// XNN_FLAG_YIELD_WORKERS, and XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER. If XNN_FLAG_YIELD_WORKERS is +/// specified, worker threads would be yielded to the system scheduler after processing the last operator +/// in the Runtime. If XNN_FLAG_TRANSIENT_INDIRECTION_BUFFER is specified, convolution operators will +/// initialize indirection buffers on each inference run using temporary memory in the workspace, instead +/// of initializing persistent indirection buffers once. +/// @param runtime_out - pointer to the variable that will be initialized with a handle to the Runtime object upon +/// successful return. Once constructed, the Runtime object is independent of the Subgraph object +/// used to create it. +enum xnn_status xnn_create_runtime_v4( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + xnn_workspace_t workspace, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v3( + xnn_subgraph_t subgraph, + xnn_weights_cache_t weights_cache, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime_v2( + xnn_subgraph_t subgraph, + pthreadpool_t threadpool, + uint32_t flags, + xnn_runtime_t* runtime_out); + +enum xnn_status xnn_create_runtime( + xnn_subgraph_t subgraph, + xnn_runtime_t* runtime_out); + +struct xnn_external_value { + uint32_t id; + void* data; +}; + +/// Reshape an external value. +/// +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. If the external ID is XNN_INVALID_VALUE_ID, an internal ID will be +/// created for the Value. +/// @param num_dims - number of dimensions in the shape. +/// @param dims - pointer to an array of @a num_dims shape dimensions. If num_dims is 0, this pointer can be NULL. +/// XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_reshape_external_value( + xnn_runtime_t runtime, + uint32_t external_id, + size_t num_dims, + const size_t* dims); + +/// Get the external value shape. +/// +/// @param external_id - external ID for the Value. The ID must be within the range of reversed Value IDs specified on +/// the Subgraph creation. The external ID can not be XNN_INVALID_VALUE_ID. +/// @param num_dims - A valid pointer into which the number of dimensions in the shape will be written. It can not be larger than XNN_MAX_TENSOR_DIMS. +/// @param dims - pointer to an array of @a num_dims shape dimensions. This pointer can't be NULL. It must be large enough to hold +/// at least @a num_dims elements. XNNPACK does not keep any pointers to this array after the function returns. +enum xnn_status xnn_get_external_value_shape( + xnn_runtime_t runtime, + uint32_t external_id, + size_t* num_dims, + size_t* dims); + +/// Reshape the XNNPACK runtime. +/// +/// Propgates the shapes of input tensors through the graph to determine the shapes of intermediate and output tensors. +/// Memory is allocated if required. Output tensor shapes are returned by xnn_get_external_value_shape. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +enum xnn_status xnn_reshape_runtime( + xnn_runtime_t runtime); + +/// Deprecated. Use xnn_reshape_runtime and xnn_setup_runtime_v2. +/// +/// Setup data pointers for external inputs and outputs in a Runtime object and +/// allocate memory. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must +/// match the number of external inputs and outputs in the runtime, i.e. all external +/// inputs and outputs in the runtime must be specified in one call. +/// @param external_values - array with location information for all external inputs and outputs in the runtime. +enum xnn_status xnn_setup_runtime( + xnn_runtime_t runtime, + size_t num_external_values, + const struct xnn_external_value* external_values); + +/// Setup data pointers for external inputs and outputs in a Runtime object. +/// Should be called after xnn_reshape_runtime. +/// +/// @param runtime - a Runtime object created with @ref xnn_create_runtime or @ref xnn_create_runtime_v2. +/// @param num_external_values - the number of external inputs and outputs specified in this call. This number must +/// match the number of external inputs and outputs in the runtime, i.e. all external +/// inputs and outputs in the runtime must be specified in one call. +/// @param external_values - array with location information for all external inputs and outputs in the runtime. +enum xnn_status xnn_setup_runtime_v2( + xnn_runtime_t runtime, + size_t num_external_values, + const struct xnn_external_value* external_values); + +/// Execute forward pass for all operators in the runtime. +/// +/// @param runtime - the Runtime object with the execution plan to invoke. +enum xnn_status xnn_invoke_runtime( + xnn_runtime_t runtime); + +/// Destroy a Runtime object, as well as operators and memory associated with it. +/// +/// @param runtime - the Runtime object to destroy. +enum xnn_status xnn_delete_runtime( + xnn_runtime_t runtime); + +typedef struct xnn_operator* xnn_operator_t; + +enum xnn_status xnn_run_operator( + xnn_operator_t op, + pthreadpool_t threadpool); + +enum xnn_status xnn_delete_operator( + xnn_operator_t op); + + +/// Operator API: +/// - create operator will create and populate a xnn_operator_t +/// - reshape operator will update fields in xnn_operator_t with shape/dimensions and parallelization information +/// - setup operator will update pointers to input and outputs +/// Each supported operator must have a create, reshape, and setup function. (Optionally a run function.) +/// Operators listed below are in alphabetical order by operator name; within each operator, we sort alphabetically by +/// data layout and type. We also group create, reshape, setup (and optionally run) functions of each operator together. + +enum xnn_status xnn_create_abs_nc_f16( + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_reshape_abs_nc_f16( + xnn_operator_t abs_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_abs_nc_f16( + xnn_operator_t abs_op, + const void* input, + void* output); + +enum xnn_status xnn_create_abs_nc_f32( + uint32_t flags, + xnn_operator_t* abs_op_out); + +enum xnn_status xnn_reshape_abs_nc_f32( + xnn_operator_t abs_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_abs_nc_f32( + xnn_operator_t abs_op, + const float* input, + float* output); + +enum xnn_status xnn_run_abs_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_f16( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_f16( + xnn_operator_t add_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_add_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_f32( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_f32( + xnn_operator_t add_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_add_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_qs8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_qs8( + xnn_operator_t add_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_add_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_add_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* add_op_out); + +enum xnn_status xnn_reshape_add_nd_qu8( + xnn_operator_t add_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_add_nd_qu8( + xnn_operator_t add_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_add_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_argmax_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t flags, + xnn_operator_t* argmax_pooling_op_out); + +enum xnn_status xnn_reshape_argmax_pooling2d_nhwc_f32( + xnn_operator_t argmax_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_argmax_pooling2d_nhwc_f32( + xnn_operator_t argmax_pooling_op, + void* workspace, + const float* input, + float* output, + uint32_t* index); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_f16( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f16( + xnn_operator_t average_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_average_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_f32( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_f32( + xnn_operator_t average_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_average_pooling2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* average_pooling_op_out); + +enum xnn_status xnn_reshape_average_pooling2d_nhwc_qu8( + xnn_operator_t average_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_average_pooling2d_nhwc_qu8( + xnn_operator_t average_pooling_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_bankers_rounding_nc_f16( + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_reshape_bankers_rounding_nc_f16( + xnn_operator_t rounding_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_bankers_rounding_nc_f16( + xnn_operator_t rounding_op, + const void* input, + void* output); + +enum xnn_status xnn_create_bankers_rounding_nc_f32( + uint32_t flags, + xnn_operator_t* rounding_op_out); + +enum xnn_status xnn_reshape_bankers_rounding_nc_f32( + xnn_operator_t rounding_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_bankers_rounding_nc_f32( + xnn_operator_t rounding_op, + const float* input, + float* output); + +enum xnn_status xnn_run_bankers_rounding_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_batch_matrix_multiply_nc_f16( + uint32_t flags, + xnn_operator_t* batch_matrix_multiply_op); + +enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f16( + xnn_operator_t batch_matrix_multiply_op, + size_t batch_size, + size_t m, + size_t k, + size_t n, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_batch_matrix_multiply_nc_f16( + xnn_operator_t batch_matrix_multiply_op, + void* workspace, + const void* lhs_input, + const void* rhs_input, + void* output); + +enum xnn_status xnn_create_batch_matrix_multiply_nc_f32( + uint32_t flags, + xnn_operator_t* batch_matrix_multiply_op); + +enum xnn_status xnn_reshape_batch_matrix_multiply_nc_f32( + xnn_operator_t batch_matrix_multiply_op, + size_t batch_size, + size_t m, + size_t k, + size_t n, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_batch_matrix_multiply_nc_f32( + xnn_operator_t batch_matrix_multiply_op, + void* workspace, + const float* lhs_input, + const float* rhs_input, + float* output); + +enum xnn_status xnn_create_ceiling_nc_f16( + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_reshape_ceiling_nc_f16( + xnn_operator_t ceiling_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_ceiling_nc_f16( + xnn_operator_t ceiling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_ceiling_nc_f32( + uint32_t flags, + xnn_operator_t* ceiling_op_out); + +enum xnn_status xnn_run_ceiling_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_ceiling_nc_f32( + xnn_operator_t ceiling_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_ceiling_nc_f32( + xnn_operator_t ceiling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_channel_shuffle_nc_x8( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_reshape_channel_shuffle_nc_x8( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_channel_shuffle_nc_x8( + xnn_operator_t channel_shuffle_op, + const void* input, + void* output); + +enum xnn_status xnn_create_channel_shuffle_nc_x32( + size_t groups, + size_t group_channels, + size_t input_stride, + size_t output_stride, + uint32_t flags, + xnn_operator_t* channel_shuffle_op_out); + +enum xnn_status xnn_reshape_channel_shuffle_nc_x32( + xnn_operator_t channel_shuffle_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_channel_shuffle_nc_x32( + xnn_operator_t channel_shuffle_op, + const void* input, + void* output); + +enum xnn_status xnn_create_clamp_nc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_f16( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_f16( + xnn_operator_t clamp_op, + const void* input, + void* output); + +enum xnn_status xnn_create_clamp_nc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_f32( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_f32( + xnn_operator_t clamp_op, + const float* input, + float* output); + +enum xnn_status xnn_run_clamp_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_clamp_nc_s8( + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_s8( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_s8( + xnn_operator_t clamp_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_clamp_nc_u8( + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* clamp_op_out); + +enum xnn_status xnn_reshape_clamp_nc_u8( + xnn_operator_t clamp_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_clamp_nc_u8( + xnn_operator_t clamp_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_constant_pad_nd_x8( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x8( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x8( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x8( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x16( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x16( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x16( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x16( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_constant_pad_nd_x32( + const void* padding_value, + uint32_t flags, + xnn_operator_t* constant_pad_op_out); + +enum xnn_status xnn_reshape_constant_pad_nd_x32( + xnn_operator_t constant_pad_op, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_padding, + const size_t* post_padding, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_constant_pad_nd_x32( + xnn_operator_t constant_pad_op, + const void* input, + void* output); + +enum xnn_status xnn_run_constant_pad_nd_x32( + uint32_t flags, + size_t num_dims, + const size_t* input_shape, + const size_t* pre_paddings, + const size_t* post_paddings, + const void* input, + void* output, + const void* padding_value, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f16_f32( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f16_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f16_f32( + xnn_operator_t convert_op, + const void* input, + float* output); + +enum xnn_status xnn_run_convert_nc_f16_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const void* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f16_qd8( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f16_qd8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries. +enum xnn_status xnn_setup_convert_nc_f16_qd8( + xnn_operator_t convert_op, + const void* input, + int8_t* output, + struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_create_convert_nc_f32_qd8( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qd8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +// quantization_params must be padded with at least XNN_EXTRA_QUANTIZATION_PARAMS entries. +enum xnn_status xnn_setup_convert_nc_f32_qd8( + xnn_operator_t convert_op, + const float* input, + int8_t* output, + struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_create_convert_nc_f32_f16( + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_f16( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_f16( + xnn_operator_t convert_op, + const float* input, + void* output); + +enum xnn_status xnn_run_convert_nc_f32_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qs8( + float output_scale, + int8_t output_zero_point, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_qs8( + xnn_operator_t convert_op, + const float* input, + int8_t* output); + +enum xnn_status xnn_run_convert_nc_f32_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + int8_t* output, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_f32_qu8( + float output_scale, + uint8_t output_zero_point, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_f32_qu8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_f32_qu8( + xnn_operator_t convert_op, + const float* input, + uint8_t* output); + +enum xnn_status xnn_run_convert_nc_f32_qu8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + uint8_t* output, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs8( + float input_scale, + int8_t input_zero_point, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8( + xnn_operator_t convert_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convert_nc_qs8_f16( + float input_scale, + int8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8_f16( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8_f16( + xnn_operator_t convert_op, + const int8_t* input, + void* output); + +enum xnn_status xnn_create_convert_nc_qs8_f32( + float input_scale, + int8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs8_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs8_f32( + xnn_operator_t convert_op, + const int8_t* input, + float* output); + +enum xnn_status xnn_run_convert_nc_qs8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const int8_t* input, + float* output, + float input_scale, + int8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qs16_qs8( + float input_scale, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qs16_qs8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qs16_qs8( + xnn_operator_t convert_op, + const int16_t* input, + int8_t* output); + +enum xnn_status xnn_run_convert_nc_qs16_qs8( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const int16_t* input, + int8_t* output, + float input_scale, + float output_scale, + int8_t output_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convert_nc_qu8( + float input_scale, + uint8_t input_zero_point, + float output_scale, + uint8_t output_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qu8( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qu8( + xnn_operator_t convert_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_convert_nc_qu8_f32( + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + xnn_operator_t* convert_op_out); + +enum xnn_status xnn_reshape_convert_nc_qu8_f32( + xnn_operator_t convert_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convert_nc_qu8_f32( + xnn_operator_t convert_op, + const uint8_t* input, + float* output); + +enum xnn_status xnn_run_convert_nc_qu8_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint8_t* input, + float* output, + float input_scale, + uint8_t input_zero_point, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_convolution2d_nchw_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nchw_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nchw_f16( + xnn_operator_t convolution_op, + const void* input, + void* output); + +enum xnn_status xnn_create_convolution2d_nchw_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nchw_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nchw_f32( + xnn_operator_t convolution_op, + const float* input, + float* output); + +enum xnn_status xnn_create_convolution2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_f16( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_f16( + xnn_operator_t convolution_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +// Forward declare. +struct xnn_post_operation; + +/// Create a convolution operator with a number of post operations. The +/// convolution operator created using this function does not have output_min +/// and output_max. The list of operators in post_operations will be applied in +/// order. Convolution with post operations is only supported on JIT platforms +/// and when JIT is enabled. +enum xnn_status xnn_create_fused_convolution2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + const float* kernel, + const float* bias, + size_t num_post_operations, + struct xnn_post_operation* post_operations, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_f32( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_f32( + xnn_operator_t convolution_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qd8_f16_qc8w( + uint32_t input_padding_top, uint32_t input_padding_right, + uint32_t input_padding_bottom, uint32_t input_padding_left, + uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height, + uint32_t subsampling_width, uint32_t dilation_height, + uint32_t dilation_width, uint32_t groups, size_t group_input_channels, + size_t group_output_channels, size_t input_channel_stride, + size_t output_channel_stride, const float* kernel_scale, + const int8_t* kernel, const float* bias, float output_min, float output_max, + uint32_t flags, xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_create_convolution2d_nhwc_qd8_f32_qc8w( + uint32_t input_padding_top, uint32_t input_padding_right, + uint32_t input_padding_bottom, uint32_t input_padding_left, + uint32_t kernel_height, uint32_t kernel_width, uint32_t subsampling_height, + uint32_t subsampling_width, uint32_t dilation_height, + uint32_t dilation_width, uint32_t groups, size_t group_input_channels, + size_t group_output_channels, size_t input_channel_stride, + size_t output_channel_stride, const float* kernel_scale, + const int8_t* kernel, const float* bias, float output_min, float output_max, + uint32_t flags, xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_create_convolution2d_nhwc_qs8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f16_qc8w( + xnn_operator_t convolution_op, size_t batch_size, size_t input_height, + size_t input_width, size_t* workspace_size, size_t* workspace_alignment, + size_t* output_height_out, size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qd8_f32_qc8w( + xnn_operator_t convolution_op, size_t batch_size, size_t input_height, + size_t input_width, size_t* workspace_size, size_t* workspace_alignment, + size_t* output_height_out, size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qs8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f16_qc8w( + xnn_operator_t convolution_op, void* workspace, const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_setup_convolution2d_nhwc_qd8_f32_qc8w( + xnn_operator_t convolution_op, void* workspace, const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_setup_convolution2d_nhwc_qs8( + xnn_operator_t convolution_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qs8_qc8w( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + int8_t input_zero_point, + float input_scale, + const float* kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qs8_qc8w( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qs8_qc8w( + xnn_operator_t convolution_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_convolution2d_nhwc_qu8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t subsampling_height, + uint32_t subsampling_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_channel_stride, + size_t output_channel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* convolution_op_out); + +enum xnn_status xnn_reshape_convolution2d_nhwc_qu8( + xnn_operator_t convolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* workspace_size, + size_t* workspace_alignment, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_convolution2d_nhwc_qu8( + xnn_operator_t convolution_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_copy_nc_x8( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x8( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x8( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_create_copy_nc_x16( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x16( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x16( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_create_copy_nc_x32( + uint32_t flags, + xnn_operator_t* copy_op_out); + +enum xnn_status xnn_reshape_copy_nc_x32( + xnn_operator_t copy_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_copy_nc_x32( + xnn_operator_t copy_op, + const void* input, + void* output); + +enum xnn_status xnn_run_copy_nc_x32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const uint32_t* input, + uint32_t* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f16( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_f16( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f16( + xnn_operator_t deconvolution_op, + const void* input, + void* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_f32( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_f32( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_f32( + xnn_operator_t deconvolution_op, + const float* input, + float* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qs8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_qs8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qs8( + xnn_operator_t deconvolution_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_deconvolution2d_nhwc_qu8( + uint32_t output_padding_top, + uint32_t output_padding_right, + uint32_t output_padding_bottom, + uint32_t output_padding_left, + uint32_t kernel_height, + uint32_t kernel_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* deconvolution_op_out); + +enum xnn_status xnn_reshape_deconvolution2d_nhwc_qu8( + xnn_operator_t deconvolution_op, + size_t batch_size, + size_t input_height, + size_t input_width, + uint32_t adjustment_height, + uint32_t adjustment_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_deconvolution2d_nhwc_qu8( + xnn_operator_t deconvolution_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x16( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nchw2nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nchw2nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nchw2nhwc_x32( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x8( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x8( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x8( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x16( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x16( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_depth_to_space_nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* depth_to_space_op_out); + +enum xnn_status xnn_reshape_depth_to_space_nhwc_x32( + xnn_operator_t depth_to_space_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_depth_to_space_nhwc_x32( + xnn_operator_t depth_to_space_op, + const void* input, + void* output); + +enum xnn_status xnn_create_divide_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_reshape_divide_nd_f16( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_divide_nd_f16( + xnn_operator_t divide_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_divide_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* divide_op_out); + +enum xnn_status xnn_reshape_divide_nd_f32( + xnn_operator_t divide_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_divide_nd_f32( + xnn_operator_t divide_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_divide_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_dynamic_fully_connected_nc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* dynamic_fully_connected_op_out); + +enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f16( + xnn_operator_t dynamic_fully_connected_op, + size_t batch_size, + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_dynamic_fully_connected_nc_f16( + xnn_operator_t dynamic_fully_connected_op, + void* workspace, + const void* input, + const void* kernel, + const void* bias, + void* output); + +enum xnn_status xnn_create_dynamic_fully_connected_nc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* dynamic_fully_connected_op_out); + +enum xnn_status xnn_reshape_dynamic_fully_connected_nc_f32( + xnn_operator_t dynamic_fully_connected_op, + size_t batch_size, + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_dynamic_fully_connected_nc_f32( + xnn_operator_t dynamic_fully_connected_op, + void* workspace, + const float* input, + const float* kernel, + const float* bias, + float* output); + +enum xnn_status xnn_create_elu_nc_f16( + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_f16( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_f16( + xnn_operator_t elu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_elu_nc_f32( + float alpha, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_f32( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_f32( + xnn_operator_t elu_op, + const float* input, + float* output); + +enum xnn_status xnn_run_elu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float alpha, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_elu_nc_qs8( + float alpha, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* elu_op_out); + +enum xnn_status xnn_reshape_elu_nc_qs8( + xnn_operator_t elu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_elu_nc_qs8( + xnn_operator_t elu_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_floor_nc_f16( + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_reshape_floor_nc_f16( + xnn_operator_t floor_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_floor_nc_f16( + xnn_operator_t floor_op, + const void* input, + void* output); + +enum xnn_status xnn_create_floor_nc_f32( + uint32_t flags, + xnn_operator_t* floor_op_out); + +enum xnn_status xnn_reshape_floor_nc_f32( + xnn_operator_t floor_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_floor_nc_f32( + xnn_operator_t floor_op, + const float* input, + float* output); + +enum xnn_status xnn_run_floor_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_f16( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const void* kernel, + const void* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f16( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f16( + xnn_operator_t fully_connected_op, + const void* input, + void* output); + +enum xnn_status xnn_create_fully_connected_nc_f32( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_f32_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const uint8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32_qc4w( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_f32_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_f32_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_f32_qc8w( + xnn_operator_t fully_connected_op, + const float* input, + float* output); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const void* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc4w( + xnn_operator_t fully_connected_op, + const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc4w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t kernel_zero_point, + const float* kernel_scale, + const void* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc4w( + xnn_operator_t fully_connected_op, + const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc4w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f16_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f16_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + void* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f16_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qd8_f32_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + const float* kernel_scale, + const int8_t* kernel, + const float* bias, + float output_min, + float output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_setup_fully_connected_nc_qd8_f32_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + float* output, + const struct xnn_dynamic_quantization_params* quantization_params); + +enum xnn_status xnn_reshape_fully_connected_nc_qd8_f32_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_fully_connected_nc_qs8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qs8( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qs8( + xnn_operator_t fully_connected_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_fully_connected_nc_qs8_qc8w( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + const float* kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qs8_qc8w( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qs8_qc8w( + xnn_operator_t fully_connected_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_fully_connected_nc_qu8( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + uint8_t input_zero_point, + float input_scale, + uint8_t kernel_zero_point, + float kernel_scale, + const uint8_t* kernel, + const int32_t* bias, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* fully_connected_op_out); + +enum xnn_status xnn_reshape_fully_connected_nc_qu8( + xnn_operator_t fully_connected_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_fully_connected_nc_qu8( + xnn_operator_t fully_connected_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_global_average_pooling_ncw_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_ncw_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f16( + xnn_operator_t global_average_pooling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_global_average_pooling_ncw_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_ncw_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_ncw_f32( + xnn_operator_t global_average_pooling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_f16( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f16( + xnn_operator_t global_average_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_f32( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_f32( + xnn_operator_t global_average_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_qs8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qs8( + xnn_operator_t global_average_pooling_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_global_average_pooling_nwc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* global_average_pooling_op_out); + +enum xnn_status xnn_reshape_global_average_pooling_nwc_qu8( + xnn_operator_t global_average_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_average_pooling_nwc_qu8( + xnn_operator_t global_average_pooling_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_global_sum_pooling_nwc_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_sum_pooling_op_out); + +enum xnn_status xnn_reshape_global_sum_pooling_nwc_f16( + xnn_operator_t global_sum_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_sum_pooling_nwc_f16( + xnn_operator_t global_sum_pooling_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_global_sum_pooling_nwc_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* global_sum_pooling_op_out); + +enum xnn_status xnn_reshape_global_sum_pooling_nwc_f32( + xnn_operator_t global_sum_pooling_op, + size_t batch_size, + size_t width, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_global_sum_pooling_nwc_f32( + xnn_operator_t global_sum_pooling_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_hardswish_nc_f16( + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_reshape_hardswish_nc_f16( + xnn_operator_t hardswish_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_hardswish_nc_f16( + xnn_operator_t hardswish_op, + const void* input, + void* output); + +enum xnn_status xnn_create_hardswish_nc_f32( + uint32_t flags, + xnn_operator_t* hardswish_op_out); + +enum xnn_status xnn_reshape_hardswish_nc_f32( + xnn_operator_t hardswish_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_hardswish_nc_f32( + xnn_operator_t hardswish_op, + const float* input, + float* output); + +enum xnn_status xnn_run_hardswish_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_f16( + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_f16( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_f16( + xnn_operator_t leaky_relu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_leaky_relu_nc_f32( + float negative_slope, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_f32( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_f32( + xnn_operator_t leaky_relu_op, + const float* input, + float* output); + +enum xnn_status xnn_run_leaky_relu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + float negative_slope, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_leaky_relu_nc_qs8( + float negative_slope, + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_qs8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_qs8( + xnn_operator_t leaky_relu_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_leaky_relu_nc_qu8( + float negative_slope, + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* leaky_relu_op_out); + +enum xnn_status xnn_reshape_leaky_relu_nc_qu8( + xnn_operator_t leaky_relu_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_leaky_relu_nc_qu8( + xnn_operator_t leaky_relu_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f16( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_f16( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f16( + xnn_operator_t max_pooling_op, + const void* input, + void* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_f32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_f32( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_f32( + xnn_operator_t max_pooling_op, + const float* input, + float* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_s8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_s8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_s8( + xnn_operator_t max_pooling_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_max_pooling2d_nhwc_u8( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + uint32_t stride_height, + uint32_t stride_width, + uint32_t dilation_height, + uint32_t dilation_width, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* max_pooling_op_out); + +enum xnn_status xnn_reshape_max_pooling2d_nhwc_u8( + xnn_operator_t max_pooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_max_pooling2d_nhwc_u8( + xnn_operator_t max_pooling_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_maximum_nd_f16( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_reshape_maximum_nd_f16( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_maximum_nd_f16( + xnn_operator_t maximum_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_maximum_nd_f32( + uint32_t flags, + xnn_operator_t* maximum_op_out); + +enum xnn_status xnn_reshape_maximum_nd_f32( + xnn_operator_t maximum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_maximum_nd_f32( + xnn_operator_t maximum_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_maximum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_mean_nd_f16( + uint32_t flags, + xnn_operator_t* mean_op_out); + +enum xnn_status xnn_reshape_mean_nd_f16( + xnn_operator_t mean_op, + size_t num_reduction_axes, + const size_t* reduction_axes, + size_t num_input_dims, + const size_t* input_shape, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_mean_nd_f16( + xnn_operator_t mean_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_mean_nd_f32( + uint32_t flags, + xnn_operator_t* mean_op_out); + +enum xnn_status xnn_reshape_mean_nd_f32( + xnn_operator_t mean_op, + size_t num_reduction_axes, + const size_t* reduction_axes, + size_t num_input_dims, + const size_t* input_shape, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_mean_nd_f32( + xnn_operator_t mean_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_minimum_nd_f16( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_reshape_minimum_nd_f16( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_minimum_nd_f16( + xnn_operator_t minimum_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_minimum_nd_f32( + uint32_t flags, + xnn_operator_t* minimum_op_out); + +enum xnn_status xnn_reshape_minimum_nd_f32( + xnn_operator_t minimum_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_minimum_nd_f32( + xnn_operator_t minimum_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_minimum_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_f16( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_f16( + xnn_operator_t multiply_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_multiply_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_f32( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_f32( + xnn_operator_t multiply_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_multiply_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_qs8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_qs8( + xnn_operator_t multiply_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_multiply_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_multiply_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* multiply_op_out); + +enum xnn_status xnn_reshape_multiply_nd_qu8( + xnn_operator_t multiply_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_multiply_nd_qu8( + xnn_operator_t multiply_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_multiply_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_negate_nc_f16( + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_reshape_negate_nc_f16( + xnn_operator_t negate_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_negate_nc_f16( + xnn_operator_t negate_op, + const void* input, + void* output); + +enum xnn_status xnn_create_negate_nc_f32( + uint32_t flags, + xnn_operator_t* negate_op_out); + +enum xnn_status xnn_reshape_negate_nc_f32( + xnn_operator_t negate_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_negate_nc_f32( + xnn_operator_t negate_op, + const float* input, + float* output); + +enum xnn_status xnn_run_negate_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_prelu_nc_f16( + size_t channels, + size_t input_stride, + size_t output_stride, + const void* negative_slope, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_reshape_prelu_nc_f16( + xnn_operator_t prelu_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_prelu_nc_f16( + xnn_operator_t prelu_op, + const void* input, + void* output); + +enum xnn_status xnn_create_prelu_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + const float* negative_slope, + uint32_t flags, + xnn_code_cache_t code_cache, + xnn_weights_cache_t weights_cache, + xnn_operator_t* prelu_op_out); + +enum xnn_status xnn_reshape_prelu_nc_f32( + xnn_operator_t prelu_op, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_prelu_nc_f32( + xnn_operator_t prelu_op, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f32( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f32( + xnn_operator_t resize_op, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nchw_f16( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nchw_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nchw_f16( + xnn_operator_t resize_op, + const void* input, + void* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f16( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f16( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f16( + xnn_operator_t resize_op, + void* workspace, + const void* input, + void* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_f32( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_f32( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_f32( + xnn_operator_t resize_op, + void* workspace, + const float* input, + float* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_s8( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_s8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_s8( + xnn_operator_t resize_op, + void* workspace, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_resize_bilinear2d_nhwc_u8( + size_t output_height, + size_t output_width, + uint32_t flags, + xnn_operator_t* resize_op_out); + +enum xnn_status xnn_reshape_resize_bilinear2d_nhwc_u8( + xnn_operator_t resize_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_resize_bilinear2d_nhwc_u8( + xnn_operator_t resize_op, + void* workspace, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_rope_nthc_f16( + size_t max_tokens, + uint32_t flags, + xnn_operator_t* rope_op_out); + +enum xnn_status xnn_reshape_rope_nthc_f16( + xnn_operator_t rope_op, + size_t batch_size, + size_t tokens, + size_t heads, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_rope_nthc_f16( + xnn_operator_t rope_op, + const void* input, + const void* weights, + void* output); + +enum xnn_status xnn_create_rope_nthc_f32( + size_t max_tokens, + uint32_t flags, + xnn_operator_t* rope_op_out); + +enum xnn_status xnn_reshape_rope_nthc_f32( + xnn_operator_t rope_op, + size_t batch_size, + size_t tokens, + size_t heads, + size_t channels, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_rope_nthc_f32( + xnn_operator_t rope_op, + const float* input, + const float* weights, + float* output); + +// N: batch size +// H: number of heads +// T: tokens (sequence length) +// C: channels (head dimension) +enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f16( + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t flags, + xnn_operator_t* attention_op_out); + +enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f16( + xnn_operator_t attention_op, + size_t batch_size, + size_t query_heads, + // Number of tokens in query. + size_t query_tokens, + size_t key_value_heads, + // Number of tokens in key/value. For self-attention, this is same as tokens. + size_t key_value_tokens, + size_t query_key_channels, + size_t value_channels, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +// Query is of dimension [batch_size, query_heads, query_tokens, channels]. +// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, channels]. +// Scale is of dimension [channels]. +// Mask is of dimension [query_tokens, key_value_tokens]. +enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f16( + xnn_operator_t attention_op, + void* workspace, + const void* query, + const void* key, + const void* value, + const void* scale, + const void* mask, + void* output); + +// N: batch size +// H: number of heads +// T: tokens (sequence length) +// C: channels (head dimension) +enum xnn_status xnn_create_scaled_dot_product_attention_nhtc_f32( + enum xnn_attention_logits_cap_type cap_type, + const void* cap_params, + uint32_t flags, + xnn_operator_t* attention_op_out); + +enum xnn_status xnn_reshape_scaled_dot_product_attention_nhtc_f32( + xnn_operator_t attention_op, + size_t batch_size, + size_t query_heads, + // Number of tokens in query. + size_t query_tokens, + size_t key_value_heads, + // Number of tokens in key/value. For self-attention, this is same as tokens. + size_t key_value_tokens, + size_t query_key_channels, + size_t value_channels, + size_t* workspace_size, + size_t* workspace_alignment, + pthreadpool_t threadpool); + +// Query is of dimension [batch_size, query_heads, query_tokens, query_key_channels]. +// Key and value are of dimension [batch_size, key_value_heads, key_value_tokens, query_key_channels]. +// Scale is of dimension [query_key_channels]. +// Mask is of dimension [query_tokens, key_value_tokens]. +// Output is of dimension [batch_size, query_heads, query_tokens, value_channels]. +enum xnn_status xnn_setup_scaled_dot_product_attention_nhtc_f32( + xnn_operator_t attention_op, + void* workspace, + const float* query, + const float* key, + const float* value, + const float* scale, + const float* mask, + float* output); + +enum xnn_status xnn_create_sigmoid_nc_f16( + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_f16( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_f16( + xnn_operator_t sigmoid_op, + const void* input, + void* output); + +enum xnn_status xnn_create_sigmoid_nc_f32( + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_f32( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_f32( + xnn_operator_t sigmoid_op, + const float* input, + float* output); + +enum xnn_status xnn_run_sigmoid_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_sigmoid_nc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_qs8( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_qs8( + xnn_operator_t sigmoid_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_sigmoid_nc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* sigmoid_op_out); + +enum xnn_status xnn_reshape_sigmoid_nc_qu8( + xnn_operator_t sigmoid_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_sigmoid_nc_qu8( + xnn_operator_t sigmoid_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_slice_nd_x16( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x16( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x16( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_create_slice_nd_x32( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x32( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x32( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_run_slice_nd_x32( + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + const void* input, + void* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_softmax_nc_f16( + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_f16( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_f16( + xnn_operator_t softmax_op, + const void* input, + void* output); + +enum xnn_status xnn_create_softmax_nc_f32( + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_f32( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_f32( + xnn_operator_t softmax_op, + const float* input, + float* output); + +enum xnn_status xnn_create_softmax_nc_qu8( + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint32_t flags, + xnn_operator_t* softmax_op_out); + +enum xnn_status xnn_reshape_softmax_nc_qu8( + xnn_operator_t softmax_op, + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_softmax_nc_qu8( + xnn_operator_t softmax_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x16( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x16( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x16( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x32( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x32( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x32( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_nc_f16( + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_reshape_square_nc_f16( + xnn_operator_t square_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_nc_f16( + xnn_operator_t square_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_nc_f32( + uint32_t flags, + xnn_operator_t* square_op_out); + +enum xnn_status xnn_reshape_square_nc_f32( + xnn_operator_t square_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_nc_f32( + xnn_operator_t square_op, + const float* input, + float* output); + +enum xnn_status xnn_run_square_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_square_root_nc_f16( + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_square_root_nc_f16( + xnn_operator_t sqrt_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_root_nc_f16( + xnn_operator_t sqrt_op, + const void* input, + void* output); + +enum xnn_status xnn_create_square_root_nc_f32( + uint32_t flags, + xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_square_root_nc_f32( + xnn_operator_t sqrt_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_square_root_nc_f32( + xnn_operator_t sqrt_op, + const float* input, + float* output); + +enum xnn_status xnn_run_square_root_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_reciprocal_square_root_nc_f32( + uint32_t flags, xnn_operator_t* sqrt_op_out); + +enum xnn_status xnn_reshape_reciprocal_square_root_nc_f32( + xnn_operator_t sqrt_op, size_t batch_size, size_t channels, + size_t input_stride, size_t output_stride, pthreadpool_t threadpool); + +enum xnn_status xnn_setup_reciprocal_square_root_nc_f32(xnn_operator_t sqrt_op, + const float* input, + float* output); + +enum xnn_status xnn_run_reciprocal_square_root_nc_f32( + size_t channels, size_t input_stride, size_t output_stride, + size_t batch_size, const float* input, float* output, uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_squared_difference_nd_f16( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_reshape_squared_difference_nd_f16( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_squared_difference_nd_f16( + xnn_operator_t squared_difference_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_squared_difference_nd_f32( + uint32_t flags, + xnn_operator_t* squared_difference_op_out); + +enum xnn_status xnn_reshape_squared_difference_nd_f32( + xnn_operator_t squared_difference_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_squared_difference_nd_f32( + xnn_operator_t squared_difference_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_squared_difference_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_f16( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_f16( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_f16( + xnn_operator_t subtract_op, + const void* input1, + const void* input2, + void* output); + +enum xnn_status xnn_create_subtract_nd_f32( + float output_min, + float output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_f32( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_f32( + xnn_operator_t subtract_op, + const float* input1, + const float* input2, + float* output); + +enum xnn_status xnn_run_subtract_nd_f32( + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + const float* input1, + const float* input2, + float* output, + float output_min, + float output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qs8( + int8_t input1_zero_point, + float input1_scale, + int8_t input2_zero_point, + float input2_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_qs8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_qs8( + xnn_operator_t subtract_op, + const int8_t* input1, + const int8_t* input2, + int8_t* output); + +enum xnn_status xnn_run_subtract_nd_qs8( + size_t num_input1_dims, + const size_t* input1_shape, + int8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + int8_t input2_zero_point, + float input2_scale, + const int8_t* input1, + const int8_t* input2, + int8_t* output, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_subtract_nd_qu8( + uint8_t input1_zero_point, + float input1_scale, + uint8_t input2_zero_point, + float input2_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* subtract_op_out); + +enum xnn_status xnn_reshape_subtract_nd_qu8( + xnn_operator_t subtract_op, + size_t num_input1_dims, + const size_t* input1_shape, + size_t num_input2_dims, + const size_t* input2_shape, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_subtract_nd_qu8( + xnn_operator_t subtract_op, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output); + +enum xnn_status xnn_run_subtract_nd_qu8( + size_t num_input1_dims, + const size_t* input1_shape, + uint8_t input1_zero_point, + float input1_scale, + size_t num_input2_dims, + const size_t* input2_shape, + uint8_t input2_zero_point, + float input2_scale, + const uint8_t* input1, + const uint8_t* input2, + uint8_t* output, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_f16( + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_f16( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_f16( + xnn_operator_t tanh_op, + const void* input, + void* output); + +enum xnn_status xnn_create_tanh_nc_f32( + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_f32( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_f32( + xnn_operator_t tanh_op, + const float* input, + float* output); + +enum xnn_status xnn_run_tanh_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_tanh_nc_qs8( + int8_t input_zero_point, + float input_scale, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_qs8( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_qs8( + xnn_operator_t tanh_op, + const int8_t* input, + int8_t* output); + +enum xnn_status xnn_create_tanh_nc_qu8( + uint8_t input_zero_point, + float input_scale, + uint8_t output_zero_point, + float output_scale, + uint8_t output_min, + uint8_t output_max, + uint32_t flags, + xnn_operator_t* tanh_op_out); + +enum xnn_status xnn_reshape_tanh_nc_qu8( + xnn_operator_t tanh_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_tanh_nc_qu8( + xnn_operator_t tanh_op, + const uint8_t* input, + uint8_t* output); + +enum xnn_status xnn_create_transpose_nd_x8( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x8( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x8( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x8( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x16( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x16( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x16( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x16( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x32( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x32( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x32( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x32( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_transpose_nd_x64( + uint32_t flags, + xnn_operator_t* transpose_op_out); + +enum xnn_status xnn_reshape_transpose_nd_x64( + xnn_operator_t transpose_op, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_transpose_nd_x64( + xnn_operator_t transpose_op, + const void* input, + void* output); + +enum xnn_status xnn_run_transpose_nd_x64( + const void* input, + void* output, + size_t num_dims, + const size_t* input_shape, + const size_t* output_perm, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_truncation_nc_f16( + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_reshape_truncation_nc_f16( + xnn_operator_t truncation_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_truncation_nc_f16( + xnn_operator_t truncation_op, + const void* input, + void* output); + +enum xnn_status xnn_create_truncation_nc_f32( + uint32_t flags, + xnn_operator_t* truncation_op_out); + +enum xnn_status xnn_reshape_truncation_nc_f32( + xnn_operator_t truncation_op, + size_t batch_size, + size_t channels, + size_t input_stride, + size_t output_stride, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_truncation_nc_f32( + xnn_operator_t truncation_op, + const float* input, + float* output); + +enum xnn_status xnn_run_truncation_nc_f32( + size_t channels, + size_t input_stride, + size_t output_stride, + size_t batch_size, + const float* input, + float* output, + uint32_t flags, + pthreadpool_t threadpool); + +enum xnn_status xnn_create_unpooling2d_nhwc_x32( + uint32_t input_padding_top, + uint32_t input_padding_right, + uint32_t input_padding_bottom, + uint32_t input_padding_left, + uint32_t pooling_height, + uint32_t pooling_width, + size_t channels, + size_t input_pixel_stride, + size_t output_pixel_stride, + uint32_t flags, + xnn_operator_t* unpooling_op_out); + +enum xnn_status xnn_reshape_unpooling2d_nhwc_x32( + xnn_operator_t unpooling_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t* output_height_out, + size_t* output_width_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_unpooling2d_nhwc_x32( + xnn_operator_t unpooling_op, + const void* input, + const uint32_t* index, + void* output); + +enum xnn_status xnn_create_slice_nd_x8( + uint32_t flags, + xnn_operator_t* slice_op_out); + +enum xnn_status xnn_reshape_slice_nd_x8( + xnn_operator_t slice_op, + size_t num_dims, + const size_t* input_shape, + const size_t* offsets, + const size_t* sizes, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_slice_nd_x8( + xnn_operator_t slice_op, + const void* input, + void* output); + +enum xnn_status xnn_create_space_to_depth_nhwc_x8( + uint32_t block_size, + uint32_t flags, + xnn_operator_t* space_to_depth_op_out); + +enum xnn_status xnn_reshape_space_to_depth_nhwc_x8( + xnn_operator_t space_to_depth_op, + size_t batch_size, + size_t input_height, + size_t input_width, + size_t input_channels, + size_t* output_height_out, + size_t* output_width_out, + size_t* output_channels_out, + pthreadpool_t threadpool); + +enum xnn_status xnn_setup_space_to_depth_nhwc_x8( + xnn_operator_t space_to_depth_op, + const void* input, + void* output); + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/ATen/ATenConfig.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/ATen/ATenConfig.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0ce7803dbf78897298d81c2679f2cdb3c872bc15 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/ATen/ATenConfig.cmake @@ -0,0 +1,9 @@ +# Find the TH includes and library +# +# ATEN_INCLUDE_DIR -- where to find the includes +# ATEN_LIBRARIES -- list of libraries to link against +# ATEN_FOUND -- set to 1 if found + +set(ATEN_FOUND 1) +set(ATEN_INCLUDE_DIR "/pytorch/torch/include") +set(ATEN_LIBRARIES "") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake new file mode 100644 index 0000000000000000000000000000000000000000..82134328c803dc87a89564638540a6cbcfa2d906 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDNN.cmake @@ -0,0 +1,78 @@ +# Find the CUDNN libraries +# +# The following variables are optionally searched for defaults +# CUDNN_ROOT: Base directory where CUDNN is found +# CUDNN_INCLUDE_DIR: Directory where CUDNN header is searched for +# CUDNN_LIBRARY: Directory where CUDNN library is searched for +# CUDNN_STATIC: Are we looking for a static library? (default: no) +# +# The following are set after configuration is done: +# CUDNN_FOUND +# CUDNN_INCLUDE_PATH +# CUDNN_LIBRARY_PATH +# + +include(FindPackageHandleStandardArgs) + +set(CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuDNN") +if (DEFINED $ENV{CUDNN_ROOT_DIR}) + message(WARNING "CUDNN_ROOT_DIR is deprecated. Please set CUDNN_ROOT instead.") +endif() +list(APPEND CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}) + +# Compatible layer for CMake <3.12. CUDNN_ROOT will be accounted in for searching paths and libraries for CMake >=3.12. +list(APPEND CMAKE_PREFIX_PATH ${CUDNN_ROOT}) + +set(CUDNN_INCLUDE_DIR $ENV{CUDNN_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuDNN header files") + +find_path(CUDNN_INCLUDE_PATH cudnn.h + HINTS ${CUDNN_INCLUDE_DIR} + PATH_SUFFIXES cuda/include cuda include) + +option(CUDNN_STATIC "Look for static CUDNN" OFF) +if (CUDNN_STATIC) + set(CUDNN_LIBNAME "libcudnn_static.a") +else() + set(CUDNN_LIBNAME "cudnn") +endif() + +set(CUDNN_LIBRARY $ENV{CUDNN_LIBRARY} CACHE PATH "Path to the cudnn library file (e.g., libcudnn.so)") +if (CUDNN_LIBRARY MATCHES ".*cudnn_static.a" AND NOT CUDNN_STATIC) + message(WARNING "CUDNN_LIBRARY points to a static library (${CUDNN_LIBRARY}) but CUDNN_STATIC is OFF.") +endif() + +find_library(CUDNN_LIBRARY_PATH ${CUDNN_LIBNAME} + PATHS ${CUDNN_LIBRARY} + PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64) + +find_package_handle_standard_args(CUDNN DEFAULT_MSG CUDNN_LIBRARY_PATH CUDNN_INCLUDE_PATH) + +if(CUDNN_FOUND) + # Get cuDNN version + if(EXISTS ${CUDNN_INCLUDE_PATH}/cudnn_version.h) + file(READ ${CUDNN_INCLUDE_PATH}/cudnn_version.h CUDNN_HEADER_CONTENTS) + else() + file(READ ${CUDNN_INCLUDE_PATH}/cudnn.h CUDNN_HEADER_CONTENTS) + endif() + string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)" + CUDNN_VERSION_MAJOR "${CUDNN_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1" + CUDNN_VERSION_MAJOR "${CUDNN_VERSION_MAJOR}") + string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)" + CUDNN_VERSION_MINOR "${CUDNN_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1" + CUDNN_VERSION_MINOR "${CUDNN_VERSION_MINOR}") + string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)" + CUDNN_VERSION_PATCH "${CUDNN_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1" + CUDNN_VERSION_PATCH "${CUDNN_VERSION_PATCH}") + # Assemble cuDNN version + if(NOT CUDNN_VERSION_MAJOR) + set(CUDNN_VERSION "?") + else() + set(CUDNN_VERSION + "${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}") + endif() +endif() + +mark_as_advanced(CUDNN_ROOT CUDNN_INCLUDE_DIR CUDNN_LIBRARY CUDNN_VERSION) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake new file mode 100644 index 0000000000000000000000000000000000000000..580f24a400d8c5662ec572c4631db9e3e47645d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake @@ -0,0 +1,106 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# + +####################################################################### +# This converts a file written in makefile syntax into one that can be included +# by CMake. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Extra output +# +# input_file:FILEPATH=<> Path to dependency file in makefile format +# +# output_file:FILEPATH=<> Path to file with dependencies in CMake readable variable +# + +file(READ ${input_file} depend_text) + +if (NOT "${depend_text}" STREQUAL "") + + # message("FOUND DEPENDS") + + string(REPLACE "\\ " " " depend_text ${depend_text}) + + # This works for the nvcc -M generated dependency files. + string(REGEX REPLACE "^.* : " "" depend_text ${depend_text}) + string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text}) + + set(dependency_list "") + + foreach(file ${depend_text}) + + string(REGEX REPLACE "^ +" "" file ${file}) + + # OK, now if we had a UNC path, nvcc has a tendency to only output the first '/' + # instead of '//'. Here we will test to see if the file exists, if it doesn't then + # try to prepend another '/' to the path and test again. If it still fails remove the + # path. + + if(NOT EXISTS "${file}") + if (EXISTS "/${file}") + set(file "/${file}") + else() + if(verbose) + message(WARNING " Removing non-existent dependency file: ${file}") + endif() + set(file "") + endif() + endif() + + # Make sure we check to see if we have a file, before asking if it is not a directory. + # if(NOT IS_DIRECTORY "") will return TRUE. + if(file AND NOT IS_DIRECTORY "${file}") + # If softlinks start to matter, we should change this to REALPATH. For now we need + # to flatten paths, because nvcc can generate stuff like /bin/../include instead of + # just /include. + get_filename_component(file_absolute "${file}" ABSOLUTE) + list(APPEND dependency_list "${file_absolute}") + endif() + + endforeach() + +else() + # message("FOUND NO DEPENDS") +endif() + +# Remove the duplicate entries and sort them. +list(REMOVE_DUPLICATES dependency_list) +list(SORT dependency_list) + +foreach(file ${dependency_list}) + string(APPEND cuda_nvcc_depend " \"${file}\"\n") +endforeach() + +file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake new file mode 100644 index 0000000000000000000000000000000000000000..25ceb49f3dd8e684e35cac49834c4db0aa5c338a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/parse_cubin.cmake @@ -0,0 +1,109 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# + +####################################################################### +# Parses a .cubin file produced by nvcc and reports statistics about the file. + + +file(READ ${input_file} file_text) + +if (NOT "${file_text}" STREQUAL "") + + string(REPLACE ";" "\\;" file_text ${file_text}) + string(REPLACE "\ncode" ";code" file_text ${file_text}) + + list(LENGTH file_text len) + + foreach(line ${file_text}) + + # Only look at "code { }" blocks. + if(line MATCHES "^code") + + # Break into individual lines. + string(REGEX REPLACE "\n" ";" line ${line}) + + foreach(entry ${line}) + + # Extract kernel names. + if (${entry} MATCHES "[^g]name = ([^ ]+)") + set(entry "${CMAKE_MATCH_1}") + + # Check to see if the kernel name starts with "_" + set(skip FALSE) + # if (${entry} MATCHES "^_") + # Skip the rest of this block. + # message("Skipping ${entry}") + # set(skip TRUE) + # else () + message("Kernel: ${entry}") + # endif () + + endif() + + # Skip the rest of the block if necessary + if(NOT skip) + + # Registers + if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Registers: ${entry}") + endif() + + # Local memory + if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Local: ${entry}") + endif() + + # Shared memory + if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)") + set(entry "${CMAKE_MATCH_3}") + message("Shared: ${entry}") + endif() + + if (${entry} MATCHES "^}") + message("") + endif() + + endif() + + + endforeach() + + endif() + + endforeach() + +else() + # message("FOUND NO DEPENDS") +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake new file mode 100644 index 0000000000000000000000000000000000000000..9293df3aafbdefdd8664ae2860d1b5b7fc9bfbfb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/run_nvcc.cmake @@ -0,0 +1,303 @@ +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +########################################################################## +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +cmake_policy(PUSH) +cmake_policy(SET CMP0007 NEW) +cmake_policy(SET CMP0010 NEW) +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path +set(source_file "@source_file@") # path +set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path +set(cmake_dependency_file "@cmake_dependency_file@") # path +set(CUDA_make2cmake "@CUDA_make2cmake@") # path +set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path +set(build_cubin @build_cubin@) # bool +set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "@generated_file_path@") # path +set(generated_file_internal "@generated_file@") # path +set(generated_cubin_file_internal "@generated_cubin_file@") # path + +set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path +set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list +@CUDA_NVCC_FLAGS_CONFIG@ +set(nvcc_flags @nvcc_flags@) # list +set(CUDA_NVCC_INCLUDE_DIRS [==[@CUDA_NVCC_INCLUDE_DIRS@]==]) # list (needs to be in lua quotes to address backslashes) +string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") +set(CUDA_NVCC_COMPILE_DEFINITIONS [==[@CUDA_NVCC_COMPILE_DEFINITIONS@]==]) # list (needs to be in lua quotes see #16510 ). +set(format_flag "@format_flag@") # string +set(cuda_language_flag @cuda_language_flag@) # list + +# Clean up list of include directories and add -I flags +list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) +set(CUDA_NVCC_INCLUDE_ARGS) +foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") +endforeach() + +# Clean up list of compile definitions, add -D flags, and append to nvcc_flags +list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) +foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) + list(APPEND nvcc_flags "-D${def}") +endforeach() + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +@CUDA_HOST_FLAGS@ + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +# Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority +list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) +list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) +if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") + else() + set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") + endif() +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT "x${_command}" STREQUAL "xCOMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, excape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif() + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + +# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag +# for dependency generation and hope for the best. +set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") +set(CUDA_VERSION @CUDA_VERSION@) + +# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This +# can cause incorrect dependencies when #including files based on this macro which is +# defined in the generating passes of nvcc invocation. We will go ahead and manually +# define this for now until a future version fixes this bug. +set(CUDACC_DEFINE -D__CUDACC__) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + -M + ${CUDACC_DEFINE} + "${source_file}" + -o "${NVCC_generated_dependency_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${depends_CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -D "verbose=${verbose}" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${cuda_language_flag} + ${format_flag} -o "${generated_file}" + ${CCBIN} + ${nvcc_flags} + ${nvcc_host_compiler_flags} + ${CUDA_NVCC_FLAGS} + -DNVCC + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + if(verbose) + message("Generated ${generated_file} successfully.") + endif() +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif() + +cmake_policy(POP) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake new file mode 100644 index 0000000000000000000000000000000000000000..01692f6dcb9603f0d600d11e1e8631eb10c4d116 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake @@ -0,0 +1,280 @@ +# Synopsis: +# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures]) +# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures +# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...) +# - "Auto" detects local machine GPU compute arch at runtime. +# - "Common" and "All" cover common and entire subsets of architectures +# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX +# NAME: Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal Volta Turing Ampere +# NUM: Any number. Only those pairs are currently accepted by NVCC though: +# 3.5 3.7 5.0 5.2 5.3 6.0 6.2 7.0 7.2 7.5 8.0 +# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable} +# Additionally, sets ${out_variable}_readable to the resulting numeric list +# Example: +# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell) +# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS}) +# +# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA +# + +if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" + AND CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)") + set(CUDA_VERSION "${CMAKE_MATCH_1}") + endif() +endif() + +# See: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list + +# This list will be used for CUDA_ARCH_NAME = All option +set(CUDA_KNOWN_GPU_ARCHITECTURES "Kepler" "Maxwell") + +# This list will be used for CUDA_ARCH_NAME = Common option (enabled by default) +set(CUDA_COMMON_GPU_ARCHITECTURES "3.5" "5.0") + +# This list is used to filter CUDA archs when autodetecting +set(CUDA_ALL_GPU_ARCHITECTURES "3.5" "5.0") + +if(CUDA_VERSION VERSION_GREATER "10.5") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ampere") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.0") + + if(CUDA_VERSION VERSION_LESS "11.1") + set(CUDA_LIMIT_GPU_ARCHITECTURE "8.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "11.1") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6") + set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6") + + if(CUDA_VERSION VERSION_LESS "11.8") + set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "11.8") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada") + list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0") + + if(CUDA_VERSION VERSION_LESS "12.0") + set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0+PTX") + endif() +endif() + +if(NOT CUDA_VERSION VERSION_LESS "12.0") + list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0a") + list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0a") + list(REMOVE_ITEM CUDA_COMMON_GPU_ARCHITECTURES "3.5") + list(REMOVE_ITEM CUDA_ALL_GPU_ARCHITECTURES "3.5") +endif() + +################################################################################################ +# A function for automatic detection of GPUs installed (if autodetection is enabled) +# Usage: +# CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE) +# +function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE) + if(NOT CUDA_GPU_DETECT_OUTPUT) + if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cu") + else() + set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cpp") + endif() + + file(WRITE ${file} "" + "#include \n" + "#include \n" + "int main()\n" + "{\n" + " int count = 0;\n" + " if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n" + " if (count == 0) return -1;\n" + " for (int device = 0; device < count; ++device)\n" + " {\n" + " cudaDeviceProp prop;\n" + " if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n" + " std::printf(\"%d.%d \", prop.major, prop.minor);\n" + " }\n" + " return 0;\n" + "}\n") + + if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language + try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} + RUN_OUTPUT_VARIABLE compute_capabilities) + else() + try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}" + LINK_LIBRARIES ${CUDA_LIBRARIES} + RUN_OUTPUT_VARIABLE compute_capabilities) + endif() + + # Filter unrelated content out of the output. + string(REGEX MATCHALL "[0-9]+\\.[0-9]+" compute_capabilities "${compute_capabilities}") + + if(run_result EQUAL 0) + string(REPLACE "2.1" "2.1(2.0)" compute_capabilities "${compute_capabilities}") + set(CUDA_GPU_DETECT_OUTPUT ${compute_capabilities} + CACHE INTERNAL "Returned GPU architectures from detect_gpus tool" FORCE) + endif() + endif() + + if(NOT CUDA_GPU_DETECT_OUTPUT) + message(STATUS "Automatic GPU detection failed. Building for common architectures.") + set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE) + else() + # Filter based on CUDA version supported archs + set(CUDA_GPU_DETECT_OUTPUT_FILTERED "") + separate_arguments(CUDA_GPU_DETECT_OUTPUT) + foreach(ITEM IN ITEMS ${CUDA_GPU_DETECT_OUTPUT}) + if(CUDA_LIMIT_GPU_ARCHITECTURE AND (ITEM VERSION_GREATER CUDA_LIMIT_GPU_ARCHITECTURE OR + ITEM VERSION_EQUAL CUDA_LIMIT_GPU_ARCHITECTURE)) + list(GET CUDA_COMMON_GPU_ARCHITECTURES -1 NEWITEM) + string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${NEWITEM}") + else() + string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${ITEM}") + endif() + endforeach() + + set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT_FILTERED} PARENT_SCOPE) + endif() +endfunction() + + +################################################################################################ +# Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list +# Usage: +# SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs]) +function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable) + set(CUDA_ARCH_LIST "${ARGN}") + + if("X${CUDA_ARCH_LIST}" STREQUAL "X" ) + set(CUDA_ARCH_LIST "Auto") + endif() + + set(cuda_arch_bin) + set(cuda_arch_ptx) + + if("${CUDA_ARCH_LIST}" STREQUAL "All") + set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES}) + elseif("${CUDA_ARCH_LIST}" STREQUAL "Common") + set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES}) + elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto") + CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST) + message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}") + endif() + + # Now process the list and look for names + string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}") + list(REMOVE_DUPLICATES CUDA_ARCH_LIST) + foreach(arch_name ${CUDA_ARCH_LIST}) + set(arch_bin) + set(arch_ptx) + set(add_ptx FALSE) + # Check to see if we are compiling PTX + if(arch_name MATCHES "(.*)\\+PTX$") + set(add_ptx TRUE) + set(arch_name ${CMAKE_MATCH_1}) + endif() + if(arch_name MATCHES "^([0-9]\\.[0-9](\\([0-9]\\.[0-9]\\))?)$") + set(arch_bin ${CMAKE_MATCH_1}) + set(arch_ptx ${arch_bin}) + else() + # Look for it in our list of known architectures + if(${arch_name} STREQUAL "Kepler+Tesla") + set(arch_bin 3.7) + elseif(${arch_name} STREQUAL "Kepler") + set(arch_bin 3.5) + set(arch_ptx 3.5) + elseif(${arch_name} STREQUAL "Maxwell+Tegra") + set(arch_bin 5.3) + elseif(${arch_name} STREQUAL "Maxwell") + set(arch_bin 5.0 5.2) + set(arch_ptx 5.2) + elseif(${arch_name} STREQUAL "Pascal") + set(arch_bin 6.0 6.1) + set(arch_ptx 6.1) + elseif(${arch_name} STREQUAL "Volta+Tegra") + set(arch_bin 7.2) + elseif(${arch_name} STREQUAL "Volta") + set(arch_bin 7.0 7.0) + set(arch_ptx 7.0) + elseif(${arch_name} STREQUAL "Turing") + set(arch_bin 7.5) + set(arch_ptx 7.5) + elseif(${arch_name} STREQUAL "Ampere+Tegra") + set(arch_bin 8.7) + elseif(${arch_name} STREQUAL "Ampere") + set(arch_bin 8.0 8.6) + set(arch_ptx 8.0 8.6) + elseif(${arch_name} STREQUAL "Ada") + set(arch_bin 8.9) + set(arch_ptx 8.9) + elseif(${arch_name} STREQUAL "Hopper") + set(arch_bin 9.0) + set(arch_ptx 9.0) + else() + message(SEND_ERROR "Unknown CUDA Architecture Name ${arch_name} in CUDA_SELECT_NVCC_ARCH_FLAGS") + endif() + endif() + if(NOT arch_bin) + message(SEND_ERROR "arch_bin wasn't set for some reason") + endif() + list(APPEND cuda_arch_bin ${arch_bin}) + if(add_ptx) + if (NOT arch_ptx) + set(arch_ptx ${arch_bin}) + endif() + list(APPEND cuda_arch_ptx ${arch_ptx}) + endif() + endforeach() + + # remove dots and convert to lists + string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}") + string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}") + string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}") + string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}") + + if(cuda_arch_bin) + list(REMOVE_DUPLICATES cuda_arch_bin) + endif() + if(cuda_arch_ptx) + list(REMOVE_DUPLICATES cuda_arch_ptx) + endif() + + set(nvcc_flags "") + set(nvcc_archs_readable "") + + # Tell NVCC to add binaries for the specified GPUs + foreach(arch ${cuda_arch_bin}) + if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)") + # User explicitly specified ARCH for the concrete CODE + list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}) + list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1}) + else() + # User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE + list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch}) + list(APPEND nvcc_archs_readable sm_${arch}) + endif() + endforeach() + + # Tell NVCC to add PTX intermediate code for the specified architectures + foreach(arch ${cuda_arch_ptx}) + list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch}) + list(APPEND nvcc_archs_readable compute_${arch}) + endforeach() + + string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}") + set(${out_variable} ${nvcc_flags} PARENT_SCOPE) + set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE) +endfunction() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..2149086394b4b3d207d4d031db6448012ec11fdd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets-release.cmake @@ -0,0 +1,39 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "tensorpipe_uv" for configuration "Release" +set_property(TARGET tensorpipe_uv APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_uv PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_uv ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_uv "${_IMPORT_PREFIX}/lib64/libtensorpipe_uv.a" ) + +# Import target "tensorpipe" for configuration "Release" +set_property(TARGET tensorpipe APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe "${_IMPORT_PREFIX}/lib64/libtensorpipe.a" ) + +# Import target "tensorpipe_cuda" for configuration "Release" +set_property(TARGET tensorpipe_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tensorpipe_cuda PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS tensorpipe_cuda ) +list(APPEND _IMPORT_CHECK_FILES_FOR_tensorpipe_cuda "${_IMPORT_PREFIX}/lib64/libtensorpipe_cuda.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..31cc4794b7b83695f9bea33ffb48340cd5e89713 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Tensorpipe/TensorpipeTargets.cmake @@ -0,0 +1,114 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.17) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget tensorpipe_uv tensorpipe tensorpipe_cuda) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target tensorpipe_uv +add_library(tensorpipe_uv STATIC IMPORTED) + +set_target_properties(tensorpipe_uv PROPERTIES + INTERFACE_LINK_LIBRARIES "\$;\$;\$" +) + +# Create imported target tensorpipe +add_library(tensorpipe STATIC IMPORTED) + +set_target_properties(tensorpipe PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "\$" +) + +# Create imported target tensorpipe_cuda +add_library(tensorpipe_cuda STATIC IMPORTED) + +set_target_properties(tensorpipe_cuda PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "/usr/local/cuda/include" + INTERFACE_LINK_LIBRARIES "tensorpipe;/usr/local/cuda/lib64/libcudart.so" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/TensorpipeTargets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake new file mode 100644 index 0000000000000000000000000000000000000000..7e21324af8fd59eb018b4c4c696e53ab06a4a0a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfig.cmake @@ -0,0 +1,190 @@ +# FindTorch +# ------- +# +# Finds the Torch library +# +# This will define the following variables: +# +# TORCH_FOUND -- True if the system has the Torch library +# TORCH_INCLUDE_DIRS -- The include directories for torch +# TORCH_LIBRARIES -- Libraries to link against +# TORCH_CXX_FLAGS -- Additional (required) compiler flags +# +# and the following imported targets: +# +# torch +macro(append_torchlib_if_found) + foreach (_arg ${ARGN}) + find_library(${_arg}_LIBRARY ${_arg} PATHS "${TORCH_INSTALL_PREFIX}/lib") + if(${_arg}_LIBRARY) + list(APPEND TORCH_LIBRARIES ${${_arg}_LIBRARY}) + else() + message(WARNING "static library ${${_arg}_LIBRARY} not found.") + endif() + endforeach() +endmacro() + +macro(append_wholearchive_lib_if_found) + foreach (_arg ${ARGN}) + find_library(${_arg}_LIBRARY ${_arg} PATHS "${TORCH_INSTALL_PREFIX}/lib") + if(${_arg}_LIBRARY) + if(APPLE) + list(APPEND TORCH_LIBRARIES "-Wl,-force_load,${${_arg}_LIBRARY}") + elseif(MSVC) + list(APPEND TORCH_LIBRARIES "-WHOLEARCHIVE:${${_arg}_LIBRARY}") + else() + # Linux + list(APPEND TORCH_LIBRARIES "-Wl,--whole-archive ${${_arg}_LIBRARY} -Wl,--no-whole-archive") + endif() + else() + message(WARNING "static library ${${_arg}_LIBRARY} not found.") + endif() + endforeach() +endmacro() + +include(FindPackageHandleStandardArgs) + +if(DEFINED ENV{TORCH_INSTALL_PREFIX}) + set(TORCH_INSTALL_PREFIX $ENV{TORCH_INSTALL_PREFIX}) +else() + # Assume we are in /share/cmake/Torch/TorchConfig.cmake + get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + get_filename_component(TORCH_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE) +endif() + +# Include directories. +if(EXISTS "${TORCH_INSTALL_PREFIX}/include") + set(TORCH_INCLUDE_DIRS + ${TORCH_INSTALL_PREFIX}/include + ${TORCH_INSTALL_PREFIX}/include/torch/csrc/api/include) +else() + set(TORCH_INCLUDE_DIRS + ${TORCH_INSTALL_PREFIX}/include + ${TORCH_INSTALL_PREFIX}/include/torch/csrc/api/include) +endif() + +# Library dependencies. +if(ON) + find_package(Caffe2 REQUIRED PATHS ${CMAKE_CURRENT_LIST_DIR}/../Caffe2) + set(TORCH_LIBRARIES torch ${Caffe2_MAIN_LIBS}) + append_torchlib_if_found(c10) +else() + add_library(torch STATIC IMPORTED) # set imported_location at the bottom + #library need whole archive + append_wholearchive_lib_if_found(torch torch_cpu) + if(ON) + append_wholearchive_lib_if_found(torch_cuda c10_cuda) + endif() + + # We need manually add dependent libraries when they are not linked into the + # shared library. + # TODO: this list might be incomplete. + append_torchlib_if_found(c10) + if(OFF) + append_torchlib_if_found(Caffe2_perfkernels_avx512 Caffe2_perfkernels_avx2 Caffe2_perfkernels_avx) + endif() + + if(ON) + append_torchlib_if_found(nnpack) + endif() + + if(ON) + append_torchlib_if_found(pytorch_qnnpack) + endif() + + if(ON) + append_torchlib_if_found(qnnpack) + endif() + + if(ON) + append_torchlib_if_found(XNNPACK) + endif() + + append_torchlib_if_found(caffe2_protos protobuf-lite protobuf protoc) + append_torchlib_if_found(onnx onnx_proto) + + append_torchlib_if_found(foxi_loader fmt) + append_torchlib_if_found(cpuinfo clog) + + if(NOT OFF) + append_torchlib_if_found(pthreadpool) + endif() + + append_torchlib_if_found(eigen_blas) + + if(ON) + append_torchlib_if_found(fbgemm) + endif() + + if(ON) + append_torchlib_if_found(dnnl mkldnn) + endif() + + append_torchlib_if_found(sleef asmjit) +endif() + +if(1) + append_torchlib_if_found(kineto) +endif() + +if(ON) + if(MSVC) + if(NOT NVTOOLEXT_HOME) + set(NVTOOLEXT_HOME "C:/Program Files/NVIDIA Corporation/NvToolsExt") + endif() + if(DEFINED ENV{NVTOOLSEXT_PATH}) + set(NVTOOLEXT_HOME $ENV{NVTOOLSEXT_PATH}) + endif() + set(TORCH_CUDA_LIBRARIES + ${NVTOOLEXT_HOME}/lib/x64/nvToolsExt64_1.lib + ${CUDA_LIBRARIES}) + list(APPEND TORCH_INCLUDE_DIRS ${NVTOOLEXT_HOME}/include) + find_library(CAFFE2_NVRTC_LIBRARY caffe2_nvrtc PATHS "${TORCH_INSTALL_PREFIX}/lib") + list(APPEND TORCH_CUDA_LIBRARIES ${CAFFE2_NVRTC_LIBRARY}) + elseif(APPLE) + set(TORCH_CUDA_LIBRARIES + ${CUDA_TOOLKIT_ROOT_DIR}/lib/libcudart.dylib + ${CUDA_TOOLKIT_ROOT_DIR}/lib/libnvrtc.dylib + ${CUDA_TOOLKIT_ROOT_DIR}/lib/libnvToolsExt.dylib + ${CUDA_LIBRARIES}) + else() + find_library(LIBNVTOOLSEXT libnvToolsExt.so PATHS ${CUDA_TOOLKIT_ROOT_DIR}/lib64/) + set(TORCH_CUDA_LIBRARIES + ${CUDA_CUDA_LIB} + ${CUDA_NVRTC_LIB} + ${LIBNVTOOLSEXT} + ${CUDA_LIBRARIES}) + endif() + if(ON) + find_library(C10_CUDA_LIBRARY c10_cuda PATHS "${TORCH_INSTALL_PREFIX}/lib") + list(APPEND TORCH_CUDA_LIBRARIES ${C10_CUDA_LIBRARY}) + endif() + list(APPEND TORCH_LIBRARIES ${TORCH_CUDA_LIBRARIES}) +endif() + +# When we build libtorch with the old libstdc++ ABI, dependent libraries must too. +if(CMAKE_SYSTEM_NAME STREQUAL "Linux") + set(TORCH_CXX_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=0") +endif() + +find_library(TORCH_LIBRARY torch PATHS "${TORCH_INSTALL_PREFIX}/lib") +# the statements below changes target properties on +# - the imported target from Caffe2Targets.cmake in shared library mode (see the find_package above) +# - this is untested whether it is the correct (or desired) methodology in CMake +# - the imported target created in this file in static library mode +if(NOT ON) + # do not set this property on the shared library target, as it will cause confusion in some builds + # as the configuration specific property is set in the Caffe2Targets.cmake file + set_target_properties(torch PROPERTIES + IMPORTED_LOCATION "${TORCH_LIBRARY}" + ) +endif() +set_target_properties(torch PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${TORCH_INCLUDE_DIRS}" + CXX_STANDARD 17 +) +if(TORCH_CXX_FLAGS) + set_property(TARGET torch PROPERTY INTERFACE_COMPILE_OPTIONS "${TORCH_CXX_FLAGS}") +endif() + +find_package_handle_standard_args(Torch DEFAULT_MSG TORCH_LIBRARY TORCH_INCLUDE_DIRS) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfigVersion.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfigVersion.cmake new file mode 100644 index 0000000000000000000000000000000000000000..eb5652936cbc91b060f8ea26e683665f3901e06c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Torch/TorchConfigVersion.cmake @@ -0,0 +1,11 @@ +set(PACKAGE_VERSION "2.3.0") + +# Check whether the requested PACKAGE_FIND_VERSION is compatible +if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") + set(PACKAGE_VERSION_EXACT TRUE) + endif() +endif()