applied-ai-018 commited on
Commit
3c0728c
·
verified ·
1 Parent(s): a2ea2d4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/clog.h +108 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/cpuinfo.h +1956 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl.h +22 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_config.h +22 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_debug.h +22 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_ocl.h +22 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl.h +22 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h +22 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h +22 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_types.h +22 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_version.h +22 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/experiments-config.h +25 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/fp16.h +11 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/fxdiv.h +425 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/libshm.h +46 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/nnpack.h +659 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/psimd.h +1384 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/pthreadpool.h +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack.h +336 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack_func.h +166 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/sleef.h +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h +143 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h +30 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h +25 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h +7 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h +25 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h +8 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h +23 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h +19 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h +30 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h +26 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h +10 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h +445 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h +32 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h +425 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h +56 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h +763 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h +64 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h +11 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h +113 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h +7 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h +105 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h +44 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_fft_functions.h +7 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_function.h +160 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_hook.h +55 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_linalg_functions.h +7 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_saved_variable_hooks.h +33 -0
llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc ADDED
Binary file (444 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/include/clog.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <stdarg.h>
12
+ #include <stdlib.h>
13
+ #include <inttypes.h>
14
+
15
+ #define CLOG_NONE 0
16
+ #define CLOG_FATAL 1
17
+ #define CLOG_ERROR 2
18
+ #define CLOG_WARNING 3
19
+ #define CLOG_INFO 4
20
+ #define CLOG_DEBUG 5
21
+
22
+ #ifndef CLOG_VISIBILITY
23
+ #if defined(__ELF__)
24
+ #define CLOG_VISIBILITY __attribute__((__visibility__("internal")))
25
+ #elif defined(__MACH__)
26
+ #define CLOG_VISIBILITY __attribute__((__visibility__("hidden")))
27
+ #else
28
+ #define CLOG_VISIBILITY
29
+ #endif
30
+ #endif
31
+
32
+ #ifndef CLOG_ARGUMENTS_FORMAT
33
+ #if defined(__GNUC__)
34
+ #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
35
+ #else
36
+ #define CLOG_ARGUMENTS_FORMAT
37
+ #endif
38
+ #endif
39
+
40
+ #ifdef __cplusplus
41
+ extern "C" {
42
+ #endif
43
+
44
+ CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args);
45
+ CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args);
46
+ CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args);
47
+ CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args);
48
+ CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args);
49
+
50
+ #define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \
51
+ CLOG_ARGUMENTS_FORMAT \
52
+ inline static void log_debug_function_name(const char* format, ...) { \
53
+ if (level >= CLOG_DEBUG) { \
54
+ va_list args; \
55
+ va_start(args, format); \
56
+ clog_vlog_debug(module, format, args); \
57
+ va_end(args); \
58
+ } \
59
+ }
60
+
61
+ #define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \
62
+ CLOG_ARGUMENTS_FORMAT \
63
+ inline static void log_info_function_name(const char* format, ...) { \
64
+ if (level >= CLOG_INFO) { \
65
+ va_list args; \
66
+ va_start(args, format); \
67
+ clog_vlog_info(module, format, args); \
68
+ va_end(args); \
69
+ } \
70
+ }
71
+
72
+ #define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \
73
+ CLOG_ARGUMENTS_FORMAT \
74
+ inline static void log_warning_function_name(const char* format, ...) { \
75
+ if (level >= CLOG_WARNING) { \
76
+ va_list args; \
77
+ va_start(args, format); \
78
+ clog_vlog_warning(module, format, args); \
79
+ va_end(args); \
80
+ } \
81
+ }
82
+
83
+ #define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \
84
+ CLOG_ARGUMENTS_FORMAT \
85
+ inline static void log_error_function_name(const char* format, ...) { \
86
+ if (level >= CLOG_ERROR) { \
87
+ va_list args; \
88
+ va_start(args, format); \
89
+ clog_vlog_error(module, format, args); \
90
+ va_end(args); \
91
+ } \
92
+ }
93
+
94
+ #define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \
95
+ CLOG_ARGUMENTS_FORMAT \
96
+ inline static void log_fatal_function_name(const char* format, ...) { \
97
+ if (level >= CLOG_FATAL) { \
98
+ va_list args; \
99
+ va_start(args, format); \
100
+ clog_vlog_fatal(module, format, args); \
101
+ va_end(args); \
102
+ } \
103
+ abort(); \
104
+ }
105
+
106
+ #ifdef __cplusplus
107
+ } /* extern "C" */
108
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/cpuinfo.h ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef CPUINFO_H
3
+ #define CPUINFO_H
4
+
5
+ #ifndef __cplusplus
6
+ #include <stdbool.h>
7
+ #endif
8
+
9
+ #ifdef __APPLE__
10
+ #include <TargetConditionals.h>
11
+ #endif
12
+
13
+ #include <stdint.h>
14
+
15
+ /* Identify architecture and define corresponding macro */
16
+
17
+ #if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
18
+ #define CPUINFO_ARCH_X86 1
19
+ #endif
20
+
21
+ #if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
22
+ #define CPUINFO_ARCH_X86_64 1
23
+ #endif
24
+
25
+ #if defined(__arm__) || defined(_M_ARM)
26
+ #define CPUINFO_ARCH_ARM 1
27
+ #endif
28
+
29
+ #if defined(__aarch64__) || defined(_M_ARM64)
30
+ #define CPUINFO_ARCH_ARM64 1
31
+ #endif
32
+
33
+ #if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
34
+ #define CPUINFO_ARCH_PPC64 1
35
+ #endif
36
+
37
+ #if defined(__asmjs__)
38
+ #define CPUINFO_ARCH_ASMJS 1
39
+ #endif
40
+
41
+ #if defined(__wasm__)
42
+ #if defined(__wasm_simd128__)
43
+ #define CPUINFO_ARCH_WASMSIMD 1
44
+ #else
45
+ #define CPUINFO_ARCH_WASM 1
46
+ #endif
47
+ #endif
48
+
49
+ /* Define other architecture-specific macros as 0 */
50
+
51
+ #ifndef CPUINFO_ARCH_X86
52
+ #define CPUINFO_ARCH_X86 0
53
+ #endif
54
+
55
+ #ifndef CPUINFO_ARCH_X86_64
56
+ #define CPUINFO_ARCH_X86_64 0
57
+ #endif
58
+
59
+ #ifndef CPUINFO_ARCH_ARM
60
+ #define CPUINFO_ARCH_ARM 0
61
+ #endif
62
+
63
+ #ifndef CPUINFO_ARCH_ARM64
64
+ #define CPUINFO_ARCH_ARM64 0
65
+ #endif
66
+
67
+ #ifndef CPUINFO_ARCH_PPC64
68
+ #define CPUINFO_ARCH_PPC64 0
69
+ #endif
70
+
71
+ #ifndef CPUINFO_ARCH_ASMJS
72
+ #define CPUINFO_ARCH_ASMJS 0
73
+ #endif
74
+
75
+ #ifndef CPUINFO_ARCH_WASM
76
+ #define CPUINFO_ARCH_WASM 0
77
+ #endif
78
+
79
+ #ifndef CPUINFO_ARCH_WASMSIMD
80
+ #define CPUINFO_ARCH_WASMSIMD 0
81
+ #endif
82
+
83
+ #if CPUINFO_ARCH_X86 && defined(_MSC_VER)
84
+ #define CPUINFO_ABI __cdecl
85
+ #elif CPUINFO_ARCH_X86 && defined(__GNUC__)
86
+ #define CPUINFO_ABI __attribute__((__cdecl__))
87
+ #else
88
+ #define CPUINFO_ABI
89
+ #endif
90
+
91
+ #define CPUINFO_CACHE_UNIFIED 0x00000001
92
+ #define CPUINFO_CACHE_INCLUSIVE 0x00000002
93
+ #define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
94
+
95
+ struct cpuinfo_cache {
96
+ /** Cache size in bytes */
97
+ uint32_t size;
98
+ /** Number of ways of associativity */
99
+ uint32_t associativity;
100
+ /** Number of sets */
101
+ uint32_t sets;
102
+ /** Number of partitions */
103
+ uint32_t partitions;
104
+ /** Line size in bytes */
105
+ uint32_t line_size;
106
+ /**
107
+ * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing).
108
+ *
109
+ * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING
110
+ */
111
+ uint32_t flags;
112
+ /** Index of the first logical processor that shares this cache */
113
+ uint32_t processor_start;
114
+ /** Number of logical processors that share this cache */
115
+ uint32_t processor_count;
116
+ };
117
+
118
+ struct cpuinfo_trace_cache {
119
+ uint32_t uops;
120
+ uint32_t associativity;
121
+ };
122
+
123
+ #define CPUINFO_PAGE_SIZE_4KB 0x1000
124
+ #define CPUINFO_PAGE_SIZE_1MB 0x100000
125
+ #define CPUINFO_PAGE_SIZE_2MB 0x200000
126
+ #define CPUINFO_PAGE_SIZE_4MB 0x400000
127
+ #define CPUINFO_PAGE_SIZE_16MB 0x1000000
128
+ #define CPUINFO_PAGE_SIZE_1GB 0x40000000
129
+
130
+ struct cpuinfo_tlb {
131
+ uint32_t entries;
132
+ uint32_t associativity;
133
+ uint64_t pages;
134
+ };
135
+
136
+ /** Vendor of processor core design */
137
+ enum cpuinfo_vendor {
138
+ /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */
139
+ cpuinfo_vendor_unknown = 0,
140
+
141
+ /* Active vendors of modern CPUs */
142
+
143
+ /**
144
+ * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures.
145
+ *
146
+ * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004.
147
+ */
148
+ cpuinfo_vendor_intel = 1,
149
+ /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */
150
+ cpuinfo_vendor_amd = 2,
151
+ /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */
152
+ cpuinfo_vendor_arm = 3,
153
+ /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */
154
+ cpuinfo_vendor_qualcomm = 4,
155
+ /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */
156
+ cpuinfo_vendor_apple = 5,
157
+ /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */
158
+ cpuinfo_vendor_samsung = 6,
159
+ /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */
160
+ cpuinfo_vendor_nvidia = 7,
161
+ /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */
162
+ cpuinfo_vendor_mips = 8,
163
+ /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */
164
+ cpuinfo_vendor_ibm = 9,
165
+ /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */
166
+ cpuinfo_vendor_ingenic = 10,
167
+ /**
168
+ * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures.
169
+ *
170
+ * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies.
171
+ */
172
+ cpuinfo_vendor_via = 11,
173
+ /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
174
+ cpuinfo_vendor_cavium = 12,
175
+ /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */
176
+ cpuinfo_vendor_broadcom = 13,
177
+ /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */
178
+ cpuinfo_vendor_apm = 14,
179
+ /**
180
+ * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures.
181
+ *
182
+ * Processors are designed by HiSilicon, a subsidiary of Huawei.
183
+ */
184
+ cpuinfo_vendor_huawei = 15,
185
+ /**
186
+ * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures.
187
+ *
188
+ * Processors are variants of AMD cores.
189
+ */
190
+ cpuinfo_vendor_hygon = 16,
191
+
192
+ /* Active vendors of embedded CPUs */
193
+
194
+ /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */
195
+ cpuinfo_vendor_texas_instruments = 30,
196
+ /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */
197
+ cpuinfo_vendor_marvell = 31,
198
+ /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */
199
+ cpuinfo_vendor_rdc = 32,
200
+ /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */
201
+ cpuinfo_vendor_dmp = 33,
202
+ /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */
203
+ cpuinfo_vendor_motorola = 34,
204
+
205
+ /* Defunct CPU vendors */
206
+
207
+ /**
208
+ * Transmeta Corporation. Vendor of x86 processor microarchitectures.
209
+ *
210
+ * Now defunct. The last processor design was released in 2004.
211
+ * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code.
212
+ */
213
+ cpuinfo_vendor_transmeta = 50,
214
+ /**
215
+ * Cyrix Corporation. Vendor of x86 processor microarchitectures.
216
+ *
217
+ * Now defunct. The last processor design was released in 1996.
218
+ */
219
+ cpuinfo_vendor_cyrix = 51,
220
+ /**
221
+ * Rise Technology. Vendor of x86 processor microarchitectures.
222
+ *
223
+ * Now defunct. The last processor design was released in 1999.
224
+ */
225
+ cpuinfo_vendor_rise = 52,
226
+ /**
227
+ * National Semiconductor. Vendor of x86 processor microarchitectures.
228
+ *
229
+ * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998.
230
+ */
231
+ cpuinfo_vendor_nsc = 53,
232
+ /**
233
+ * Silicon Integrated Systems. Vendor of x86 processor microarchitectures.
234
+ *
235
+ * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001.
236
+ */
237
+ cpuinfo_vendor_sis = 54,
238
+ /**
239
+ * NexGen. Vendor of x86 processor microarchitectures.
240
+ *
241
+ * Now defunct. The last processor design was released in 1994.
242
+ * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations.
243
+ */
244
+ cpuinfo_vendor_nexgen = 55,
245
+ /**
246
+ * United Microelectronics Corporation. Vendor of x86 processor microarchitectures.
247
+ *
248
+ * Ceased x86 in the early 1990s. The last processor design was released in 1991.
249
+ * Designed U5C and U5D processors. Both are 486 level.
250
+ */
251
+ cpuinfo_vendor_umc = 56,
252
+ /**
253
+ * Digital Equipment Corporation. Vendor of ARM processor microarchitecture.
254
+ *
255
+ * Sold its ARM designs in 1997. The last processor design was released in 1997.
256
+ */
257
+ cpuinfo_vendor_dec = 57,
258
+ };
259
+
260
+ /**
261
+ * Processor microarchitecture
262
+ *
263
+ * Processors with different microarchitectures often have different instruction performance characteristics,
264
+ * and may have dramatically different pipeline organization.
265
+ */
266
+ enum cpuinfo_uarch {
267
+ /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */
268
+ cpuinfo_uarch_unknown = 0,
269
+
270
+ /** Pentium and Pentium MMX microarchitecture. */
271
+ cpuinfo_uarch_p5 = 0x00100100,
272
+ /** Intel Quark microarchitecture. */
273
+ cpuinfo_uarch_quark = 0x00100101,
274
+
275
+ /** Pentium Pro, Pentium II, and Pentium III. */
276
+ cpuinfo_uarch_p6 = 0x00100200,
277
+ /** Pentium M. */
278
+ cpuinfo_uarch_dothan = 0x00100201,
279
+ /** Intel Core microarchitecture. */
280
+ cpuinfo_uarch_yonah = 0x00100202,
281
+ /** Intel Core 2 microarchitecture on 65 nm process. */
282
+ cpuinfo_uarch_conroe = 0x00100203,
283
+ /** Intel Core 2 microarchitecture on 45 nm process. */
284
+ cpuinfo_uarch_penryn = 0x00100204,
285
+ /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */
286
+ cpuinfo_uarch_nehalem = 0x00100205,
287
+ /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */
288
+ cpuinfo_uarch_sandy_bridge = 0x00100206,
289
+ /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */
290
+ cpuinfo_uarch_ivy_bridge = 0x00100207,
291
+ /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */
292
+ cpuinfo_uarch_haswell = 0x00100208,
293
+ /** Intel Broadwell microarchitecture. */
294
+ cpuinfo_uarch_broadwell = 0x00100209,
295
+ /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */
296
+ cpuinfo_uarch_sky_lake = 0x0010020A,
297
+ /** DEPRECATED (Intel Kaby Lake microarchitecture). */
298
+ cpuinfo_uarch_kaby_lake = 0x0010020A,
299
+ /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */
300
+ cpuinfo_uarch_palm_cove = 0x0010020B,
301
+ /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */
302
+ cpuinfo_uarch_sunny_cove = 0x0010020C,
303
+
304
+ /** Pentium 4 with Willamette, Northwood, or Foster cores. */
305
+ cpuinfo_uarch_willamette = 0x00100300,
306
+ /** Pentium 4 with Prescott and later cores. */
307
+ cpuinfo_uarch_prescott = 0x00100301,
308
+
309
+ /** Intel Atom on 45 nm process. */
310
+ cpuinfo_uarch_bonnell = 0x00100400,
311
+ /** Intel Atom on 32 nm process. */
312
+ cpuinfo_uarch_saltwell = 0x00100401,
313
+ /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */
314
+ cpuinfo_uarch_silvermont = 0x00100402,
315
+ /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */
316
+ cpuinfo_uarch_airmont = 0x00100403,
317
+ /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */
318
+ cpuinfo_uarch_goldmont = 0x00100404,
319
+ /** Intel Goldmont Plus microarchitecture (Gemini Lake). */
320
+ cpuinfo_uarch_goldmont_plus = 0x00100405,
321
+
322
+ /** Intel Knights Ferry HPC boards. */
323
+ cpuinfo_uarch_knights_ferry = 0x00100500,
324
+ /** Intel Knights Corner HPC boards (aka Xeon Phi). */
325
+ cpuinfo_uarch_knights_corner = 0x00100501,
326
+ /** Intel Knights Landing microarchitecture (second-gen MIC). */
327
+ cpuinfo_uarch_knights_landing = 0x00100502,
328
+ /** Intel Knights Hill microarchitecture (third-gen MIC). */
329
+ cpuinfo_uarch_knights_hill = 0x00100503,
330
+ /** Intel Knights Mill Xeon Phi. */
331
+ cpuinfo_uarch_knights_mill = 0x00100504,
332
+
333
+ /** Intel/Marvell XScale series. */
334
+ cpuinfo_uarch_xscale = 0x00100600,
335
+
336
+ /** AMD K5. */
337
+ cpuinfo_uarch_k5 = 0x00200100,
338
+ /** AMD K6 and alike. */
339
+ cpuinfo_uarch_k6 = 0x00200101,
340
+ /** AMD Athlon and Duron. */
341
+ cpuinfo_uarch_k7 = 0x00200102,
342
+ /** AMD Athlon 64, Opteron 64. */
343
+ cpuinfo_uarch_k8 = 0x00200103,
344
+ /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */
345
+ cpuinfo_uarch_k10 = 0x00200104,
346
+ /**
347
+ * AMD Bulldozer microarchitecture
348
+ * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs.
349
+ */
350
+ cpuinfo_uarch_bulldozer = 0x00200105,
351
+ /**
352
+ * AMD Piledriver microarchitecture
353
+ * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs.
354
+ */
355
+ cpuinfo_uarch_piledriver = 0x00200106,
356
+ /** AMD Steamroller microarchitecture (Kaveri APUs). */
357
+ cpuinfo_uarch_steamroller = 0x00200107,
358
+ /** AMD Excavator microarchitecture (Carizzo APUs). */
359
+ cpuinfo_uarch_excavator = 0x00200108,
360
+ /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */
361
+ cpuinfo_uarch_zen = 0x00200109,
362
+ /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */
363
+ cpuinfo_uarch_zen2 = 0x0020010A,
364
+ /** AMD Zen 3 microarchitecture. */
365
+ cpuinfo_uarch_zen3 = 0x0020010B,
366
+ /** AMD Zen 4 microarchitecture. */
367
+ cpuinfo_uarch_zen4 = 0x0020010C,
368
+
369
+ /** NSC Geode and AMD Geode GX and LX. */
370
+ cpuinfo_uarch_geode = 0x00200200,
371
+ /** AMD Bobcat mobile microarchitecture. */
372
+ cpuinfo_uarch_bobcat = 0x00200201,
373
+ /** AMD Jaguar mobile microarchitecture. */
374
+ cpuinfo_uarch_jaguar = 0x00200202,
375
+ /** AMD Puma mobile microarchitecture. */
376
+ cpuinfo_uarch_puma = 0x00200203,
377
+
378
+ /** ARM7 series. */
379
+ cpuinfo_uarch_arm7 = 0x00300100,
380
+ /** ARM9 series. */
381
+ cpuinfo_uarch_arm9 = 0x00300101,
382
+ /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */
383
+ cpuinfo_uarch_arm11 = 0x00300102,
384
+
385
+ /** ARM Cortex-A5. */
386
+ cpuinfo_uarch_cortex_a5 = 0x00300205,
387
+ /** ARM Cortex-A7. */
388
+ cpuinfo_uarch_cortex_a7 = 0x00300207,
389
+ /** ARM Cortex-A8. */
390
+ cpuinfo_uarch_cortex_a8 = 0x00300208,
391
+ /** ARM Cortex-A9. */
392
+ cpuinfo_uarch_cortex_a9 = 0x00300209,
393
+ /** ARM Cortex-A12. */
394
+ cpuinfo_uarch_cortex_a12 = 0x00300212,
395
+ /** ARM Cortex-A15. */
396
+ cpuinfo_uarch_cortex_a15 = 0x00300215,
397
+ /** ARM Cortex-A17. */
398
+ cpuinfo_uarch_cortex_a17 = 0x00300217,
399
+
400
+ /** ARM Cortex-A32. */
401
+ cpuinfo_uarch_cortex_a32 = 0x00300332,
402
+ /** ARM Cortex-A35. */
403
+ cpuinfo_uarch_cortex_a35 = 0x00300335,
404
+ /** ARM Cortex-A53. */
405
+ cpuinfo_uarch_cortex_a53 = 0x00300353,
406
+ /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */
407
+ cpuinfo_uarch_cortex_a55r0 = 0x00300354,
408
+ /** ARM Cortex-A55. */
409
+ cpuinfo_uarch_cortex_a55 = 0x00300355,
410
+ /** ARM Cortex-A57. */
411
+ cpuinfo_uarch_cortex_a57 = 0x00300357,
412
+ /** ARM Cortex-A65. */
413
+ cpuinfo_uarch_cortex_a65 = 0x00300365,
414
+ /** ARM Cortex-A72. */
415
+ cpuinfo_uarch_cortex_a72 = 0x00300372,
416
+ /** ARM Cortex-A73. */
417
+ cpuinfo_uarch_cortex_a73 = 0x00300373,
418
+ /** ARM Cortex-A75. */
419
+ cpuinfo_uarch_cortex_a75 = 0x00300375,
420
+ /** ARM Cortex-A76. */
421
+ cpuinfo_uarch_cortex_a76 = 0x00300376,
422
+ /** ARM Cortex-A77. */
423
+ cpuinfo_uarch_cortex_a77 = 0x00300377,
424
+ /** ARM Cortex-A78. */
425
+ cpuinfo_uarch_cortex_a78 = 0x00300378,
426
+
427
+ /** ARM Neoverse N1. */
428
+ cpuinfo_uarch_neoverse_n1 = 0x00300400,
429
+ /** ARM Neoverse E1. */
430
+ cpuinfo_uarch_neoverse_e1 = 0x00300401,
431
+ /** ARM Neoverse V1. */
432
+ cpuinfo_uarch_neoverse_v1 = 0x00300402,
433
+ /** ARM Neoverse N2. */
434
+ cpuinfo_uarch_neoverse_n2 = 0x00300403,
435
+ /** ARM Neoverse V2. */
436
+ cpuinfo_uarch_neoverse_v2 = 0x00300404,
437
+
438
+ /** ARM Cortex-X1. */
439
+ cpuinfo_uarch_cortex_x1 = 0x00300501,
440
+ /** ARM Cortex-X2. */
441
+ cpuinfo_uarch_cortex_x2 = 0x00300502,
442
+ /** ARM Cortex-X3. */
443
+ cpuinfo_uarch_cortex_x3 = 0x00300503,
444
+
445
+ /** ARM Cortex-A510. */
446
+ cpuinfo_uarch_cortex_a510 = 0x00300551,
447
+ /** ARM Cortex-A710. */
448
+ cpuinfo_uarch_cortex_a710 = 0x00300571,
449
+ /** ARM Cortex-A715. */
450
+ cpuinfo_uarch_cortex_a715 = 0x00300572,
451
+
452
+ /** Qualcomm Scorpion. */
453
+ cpuinfo_uarch_scorpion = 0x00400100,
454
+ /** Qualcomm Krait. */
455
+ cpuinfo_uarch_krait = 0x00400101,
456
+ /** Qualcomm Kryo. */
457
+ cpuinfo_uarch_kryo = 0x00400102,
458
+ /** Qualcomm Falkor. */
459
+ cpuinfo_uarch_falkor = 0x00400103,
460
+ /** Qualcomm Saphira. */
461
+ cpuinfo_uarch_saphira = 0x00400104,
462
+
463
+ /** Nvidia Denver. */
464
+ cpuinfo_uarch_denver = 0x00500100,
465
+ /** Nvidia Denver 2. */
466
+ cpuinfo_uarch_denver2 = 0x00500101,
467
+ /** Nvidia Carmel. */
468
+ cpuinfo_uarch_carmel = 0x00500102,
469
+
470
+ /** Samsung Exynos M1 (Exynos 8890 big cores). */
471
+ cpuinfo_uarch_exynos_m1 = 0x00600100,
472
+ /** Samsung Exynos M2 (Exynos 8895 big cores). */
473
+ cpuinfo_uarch_exynos_m2 = 0x00600101,
474
+ /** Samsung Exynos M3 (Exynos 9810 big cores). */
475
+ cpuinfo_uarch_exynos_m3 = 0x00600102,
476
+ /** Samsung Exynos M4 (Exynos 9820 big cores). */
477
+ cpuinfo_uarch_exynos_m4 = 0x00600103,
478
+ /** Samsung Exynos M5 (Exynos 9830 big cores). */
479
+ cpuinfo_uarch_exynos_m5 = 0x00600104,
480
+
481
+ /* Deprecated synonym for Cortex-A76 */
482
+ cpuinfo_uarch_cortex_a76ae = 0x00300376,
483
+ /* Deprecated names for Exynos. */
484
+ cpuinfo_uarch_mongoose_m1 = 0x00600100,
485
+ cpuinfo_uarch_mongoose_m2 = 0x00600101,
486
+ cpuinfo_uarch_meerkat_m3 = 0x00600102,
487
+ cpuinfo_uarch_meerkat_m4 = 0x00600103,
488
+
489
+ /** Apple A6 and A6X processors. */
490
+ cpuinfo_uarch_swift = 0x00700100,
491
+ /** Apple A7 processor. */
492
+ cpuinfo_uarch_cyclone = 0x00700101,
493
+ /** Apple A8 and A8X processor. */
494
+ cpuinfo_uarch_typhoon = 0x00700102,
495
+ /** Apple A9 and A9X processor. */
496
+ cpuinfo_uarch_twister = 0x00700103,
497
+ /** Apple A10 and A10X processor. */
498
+ cpuinfo_uarch_hurricane = 0x00700104,
499
+ /** Apple A11 processor (big cores). */
500
+ cpuinfo_uarch_monsoon = 0x00700105,
501
+ /** Apple A11 processor (little cores). */
502
+ cpuinfo_uarch_mistral = 0x00700106,
503
+ /** Apple A12 processor (big cores). */
504
+ cpuinfo_uarch_vortex = 0x00700107,
505
+ /** Apple A12 processor (little cores). */
506
+ cpuinfo_uarch_tempest = 0x00700108,
507
+ /** Apple A13 processor (big cores). */
508
+ cpuinfo_uarch_lightning = 0x00700109,
509
+ /** Apple A13 processor (little cores). */
510
+ cpuinfo_uarch_thunder = 0x0070010A,
511
+ /** Apple A14 / M1 processor (big cores). */
512
+ cpuinfo_uarch_firestorm = 0x0070010B,
513
+ /** Apple A14 / M1 processor (little cores). */
514
+ cpuinfo_uarch_icestorm = 0x0070010C,
515
+ /** Apple A15 / M2 processor (big cores). */
516
+ cpuinfo_uarch_avalanche = 0x0070010D,
517
+ /** Apple A15 / M2 processor (little cores). */
518
+ cpuinfo_uarch_blizzard = 0x0070010E,
519
+
520
+ /** Cavium ThunderX. */
521
+ cpuinfo_uarch_thunderx = 0x00800100,
522
+ /** Cavium ThunderX2 (originally Broadcom Vulkan). */
523
+ cpuinfo_uarch_thunderx2 = 0x00800200,
524
+
525
+ /** Marvell PJ4. */
526
+ cpuinfo_uarch_pj4 = 0x00900100,
527
+
528
+ /** Broadcom Brahma B15. */
529
+ cpuinfo_uarch_brahma_b15 = 0x00A00100,
530
+ /** Broadcom Brahma B53. */
531
+ cpuinfo_uarch_brahma_b53 = 0x00A00101,
532
+
533
+ /** Applied Micro X-Gene. */
534
+ cpuinfo_uarch_xgene = 0x00B00100,
535
+
536
+ /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */
537
+ cpuinfo_uarch_dhyana = 0x01000100,
538
+
539
+ /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */
540
+ cpuinfo_uarch_taishan_v110 = 0x00C00100,
541
+ };
542
+
543
+ struct cpuinfo_processor {
544
+ /** SMT (hyperthread) ID within a core */
545
+ uint32_t smt_id;
546
+ /** Core containing this logical processor */
547
+ const struct cpuinfo_core* core;
548
+ /** Cluster of cores containing this logical processor */
549
+ const struct cpuinfo_cluster* cluster;
550
+ /** Physical package containing this logical processor */
551
+ const struct cpuinfo_package* package;
552
+ #if defined(__linux__)
553
+ /**
554
+ * Linux-specific ID for the logical processor:
555
+ * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu<linux_id>/
556
+ * - Bit <linux_id> in the cpu_set_t identifies this logical processor
557
+ */
558
+ int linux_id;
559
+ #endif
560
+ #if defined(_WIN32) || defined(__CYGWIN__)
561
+ /** Windows-specific ID for the group containing the logical processor. */
562
+ uint16_t windows_group_id;
563
+ /**
564
+ * Windows-specific ID of the logical processor within its group:
565
+ * - Bit <windows_processor_id> in the KAFFINITY mask identifies this logical processor within its group.
566
+ */
567
+ uint16_t windows_processor_id;
568
+ #endif
569
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
570
+ /** APIC ID (unique x86-specific ID of the logical processor) */
571
+ uint32_t apic_id;
572
+ #endif
573
+ struct {
574
+ /** Level 1 instruction cache */
575
+ const struct cpuinfo_cache* l1i;
576
+ /** Level 1 data cache */
577
+ const struct cpuinfo_cache* l1d;
578
+ /** Level 2 unified or data cache */
579
+ const struct cpuinfo_cache* l2;
580
+ /** Level 3 unified or data cache */
581
+ const struct cpuinfo_cache* l3;
582
+ /** Level 4 unified or data cache */
583
+ const struct cpuinfo_cache* l4;
584
+ } cache;
585
+ };
586
+
587
+ struct cpuinfo_core {
588
+ /** Index of the first logical processor on this core. */
589
+ uint32_t processor_start;
590
+ /** Number of logical processors on this core */
591
+ uint32_t processor_count;
592
+ /** Core ID within a package */
593
+ uint32_t core_id;
594
+ /** Cluster containing this core */
595
+ const struct cpuinfo_cluster* cluster;
596
+ /** Physical package containing this core. */
597
+ const struct cpuinfo_package* package;
598
+ /** Vendor of the CPU microarchitecture for this core */
599
+ enum cpuinfo_vendor vendor;
600
+ /** CPU microarchitecture for this core */
601
+ enum cpuinfo_uarch uarch;
602
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
603
+ /** Value of CPUID leaf 1 EAX register for this core */
604
+ uint32_t cpuid;
605
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
606
+ /** Value of Main ID Register (MIDR) for this core */
607
+ uint32_t midr;
608
+ #endif
609
+ /** Clock rate (non-Turbo) of the core, in Hz */
610
+ uint64_t frequency;
611
+ };
612
+
613
+ struct cpuinfo_cluster {
614
+ /** Index of the first logical processor in the cluster */
615
+ uint32_t processor_start;
616
+ /** Number of logical processors in the cluster */
617
+ uint32_t processor_count;
618
+ /** Index of the first core in the cluster */
619
+ uint32_t core_start;
620
+ /** Number of cores on the cluster */
621
+ uint32_t core_count;
622
+ /** Cluster ID within a package */
623
+ uint32_t cluster_id;
624
+ /** Physical package containing the cluster */
625
+ const struct cpuinfo_package* package;
626
+ /** CPU microarchitecture vendor of the cores in the cluster */
627
+ enum cpuinfo_vendor vendor;
628
+ /** CPU microarchitecture of the cores in the cluster */
629
+ enum cpuinfo_uarch uarch;
630
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
631
+ /** Value of CPUID leaf 1 EAX register of the cores in the cluster */
632
+ uint32_t cpuid;
633
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
634
+ /** Value of Main ID Register (MIDR) of the cores in the cluster */
635
+ uint32_t midr;
636
+ #endif
637
+ /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
638
+ uint64_t frequency;
639
+ };
640
+
641
+ #define CPUINFO_PACKAGE_NAME_MAX 48
642
+
643
+ struct cpuinfo_package {
644
+ /** SoC or processor chip model name */
645
+ char name[CPUINFO_PACKAGE_NAME_MAX];
646
+ /** Index of the first logical processor on this physical package */
647
+ uint32_t processor_start;
648
+ /** Number of logical processors on this physical package */
649
+ uint32_t processor_count;
650
+ /** Index of the first core on this physical package */
651
+ uint32_t core_start;
652
+ /** Number of cores on this physical package */
653
+ uint32_t core_count;
654
+ /** Index of the first cluster of cores on this physical package */
655
+ uint32_t cluster_start;
656
+ /** Number of clusters of cores on this physical package */
657
+ uint32_t cluster_count;
658
+ };
659
+
660
+ struct cpuinfo_uarch_info {
661
+ /** Type of CPU microarchitecture */
662
+ enum cpuinfo_uarch uarch;
663
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
664
+ /** Value of CPUID leaf 1 EAX register for the microarchitecture */
665
+ uint32_t cpuid;
666
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
667
+ /** Value of Main ID Register (MIDR) for the microarchitecture */
668
+ uint32_t midr;
669
+ #endif
670
+ /** Number of logical processors with the microarchitecture */
671
+ uint32_t processor_count;
672
+ /** Number of cores with the microarchitecture */
673
+ uint32_t core_count;
674
+ };
675
+
676
+ #ifdef __cplusplus
677
+ extern "C" {
678
+ #endif
679
+
680
+ bool CPUINFO_ABI cpuinfo_initialize(void);
681
+
682
+ void CPUINFO_ABI cpuinfo_deinitialize(void);
683
+
684
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
685
+ /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */
686
+ struct cpuinfo_x86_isa {
687
+ #if CPUINFO_ARCH_X86
688
+ bool rdtsc;
689
+ #endif
690
+ bool rdtscp;
691
+ bool rdpid;
692
+ bool sysenter;
693
+ #if CPUINFO_ARCH_X86
694
+ bool syscall;
695
+ #endif
696
+ bool msr;
697
+ bool clzero;
698
+ bool clflush;
699
+ bool clflushopt;
700
+ bool mwait;
701
+ bool mwaitx;
702
+ #if CPUINFO_ARCH_X86
703
+ bool emmx;
704
+ #endif
705
+ bool fxsave;
706
+ bool xsave;
707
+ #if CPUINFO_ARCH_X86
708
+ bool fpu;
709
+ bool mmx;
710
+ bool mmx_plus;
711
+ #endif
712
+ bool three_d_now;
713
+ bool three_d_now_plus;
714
+ #if CPUINFO_ARCH_X86
715
+ bool three_d_now_geode;
716
+ #endif
717
+ bool prefetch;
718
+ bool prefetchw;
719
+ bool prefetchwt1;
720
+ #if CPUINFO_ARCH_X86
721
+ bool daz;
722
+ bool sse;
723
+ bool sse2;
724
+ #endif
725
+ bool sse3;
726
+ bool ssse3;
727
+ bool sse4_1;
728
+ bool sse4_2;
729
+ bool sse4a;
730
+ bool misaligned_sse;
731
+ bool avx;
732
+ bool avxvnni;
733
+ bool fma3;
734
+ bool fma4;
735
+ bool xop;
736
+ bool f16c;
737
+ bool avx2;
738
+ bool avx512f;
739
+ bool avx512pf;
740
+ bool avx512er;
741
+ bool avx512cd;
742
+ bool avx512dq;
743
+ bool avx512bw;
744
+ bool avx512vl;
745
+ bool avx512ifma;
746
+ bool avx512vbmi;
747
+ bool avx512vbmi2;
748
+ bool avx512bitalg;
749
+ bool avx512vpopcntdq;
750
+ bool avx512vnni;
751
+ bool avx512bf16;
752
+ bool avx512fp16;
753
+ bool avx512vp2intersect;
754
+ bool avx512_4vnniw;
755
+ bool avx512_4fmaps;
756
+ bool hle;
757
+ bool rtm;
758
+ bool xtest;
759
+ bool mpx;
760
+ #if CPUINFO_ARCH_X86
761
+ bool cmov;
762
+ bool cmpxchg8b;
763
+ #endif
764
+ bool cmpxchg16b;
765
+ bool clwb;
766
+ bool movbe;
767
+ #if CPUINFO_ARCH_X86_64
768
+ bool lahf_sahf;
769
+ #endif
770
+ bool fs_gs_base;
771
+ bool lzcnt;
772
+ bool popcnt;
773
+ bool tbm;
774
+ bool bmi;
775
+ bool bmi2;
776
+ bool adx;
777
+ bool aes;
778
+ bool vaes;
779
+ bool pclmulqdq;
780
+ bool vpclmulqdq;
781
+ bool gfni;
782
+ bool rdrand;
783
+ bool rdseed;
784
+ bool sha;
785
+ bool rng;
786
+ bool ace;
787
+ bool ace2;
788
+ bool phe;
789
+ bool pmm;
790
+ bool lwp;
791
+ };
792
+
793
+ extern struct cpuinfo_x86_isa cpuinfo_isa;
794
+ #endif
795
+
796
+ static inline bool cpuinfo_has_x86_rdtsc(void) {
797
+ #if CPUINFO_ARCH_X86_64
798
+ return true;
799
+ #elif CPUINFO_ARCH_X86
800
+ #if defined(__ANDROID__)
801
+ return true;
802
+ #else
803
+ return cpuinfo_isa.rdtsc;
804
+ #endif
805
+ #else
806
+ return false;
807
+ #endif
808
+ }
809
+
810
+ static inline bool cpuinfo_has_x86_rdtscp(void) {
811
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
812
+ return cpuinfo_isa.rdtscp;
813
+ #else
814
+ return false;
815
+ #endif
816
+ }
817
+
818
+ static inline bool cpuinfo_has_x86_rdpid(void) {
819
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
820
+ return cpuinfo_isa.rdpid;
821
+ #else
822
+ return false;
823
+ #endif
824
+ }
825
+
826
+ static inline bool cpuinfo_has_x86_clzero(void) {
827
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
828
+ return cpuinfo_isa.clzero;
829
+ #else
830
+ return false;
831
+ #endif
832
+ }
833
+
834
+ static inline bool cpuinfo_has_x86_mwait(void) {
835
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
836
+ return cpuinfo_isa.mwait;
837
+ #else
838
+ return false;
839
+ #endif
840
+ }
841
+
842
+ static inline bool cpuinfo_has_x86_mwaitx(void) {
843
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
844
+ return cpuinfo_isa.mwaitx;
845
+ #else
846
+ return false;
847
+ #endif
848
+ }
849
+
850
+ static inline bool cpuinfo_has_x86_fxsave(void) {
851
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
852
+ return cpuinfo_isa.fxsave;
853
+ #else
854
+ return false;
855
+ #endif
856
+ }
857
+
858
+ static inline bool cpuinfo_has_x86_xsave(void) {
859
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
860
+ return cpuinfo_isa.xsave;
861
+ #else
862
+ return false;
863
+ #endif
864
+ }
865
+
866
+ static inline bool cpuinfo_has_x86_fpu(void) {
867
+ #if CPUINFO_ARCH_X86_64
868
+ return true;
869
+ #elif CPUINFO_ARCH_X86
870
+ #if defined(__ANDROID__)
871
+ return true;
872
+ #else
873
+ return cpuinfo_isa.fpu;
874
+ #endif
875
+ #else
876
+ return false;
877
+ #endif
878
+ }
879
+
880
+ static inline bool cpuinfo_has_x86_mmx(void) {
881
+ #if CPUINFO_ARCH_X86_64
882
+ return true;
883
+ #elif CPUINFO_ARCH_X86
884
+ #if defined(__ANDROID__)
885
+ return true;
886
+ #else
887
+ return cpuinfo_isa.mmx;
888
+ #endif
889
+ #else
890
+ return false;
891
+ #endif
892
+ }
893
+
894
+ static inline bool cpuinfo_has_x86_mmx_plus(void) {
895
+ #if CPUINFO_ARCH_X86_64
896
+ return true;
897
+ #elif CPUINFO_ARCH_X86
898
+ #if defined(__ANDROID__)
899
+ return true;
900
+ #else
901
+ return cpuinfo_isa.mmx_plus;
902
+ #endif
903
+ #else
904
+ return false;
905
+ #endif
906
+ }
907
+
908
+ static inline bool cpuinfo_has_x86_3dnow(void) {
909
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
910
+ return cpuinfo_isa.three_d_now;
911
+ #else
912
+ return false;
913
+ #endif
914
+ }
915
+
916
+ static inline bool cpuinfo_has_x86_3dnow_plus(void) {
917
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
918
+ return cpuinfo_isa.three_d_now_plus;
919
+ #else
920
+ return false;
921
+ #endif
922
+ }
923
+
924
+ static inline bool cpuinfo_has_x86_3dnow_geode(void) {
925
+ #if CPUINFO_ARCH_X86_64
926
+ return false;
927
+ #elif CPUINFO_ARCH_X86
928
+ #if defined(__ANDROID__)
929
+ return false;
930
+ #else
931
+ return cpuinfo_isa.three_d_now_geode;
932
+ #endif
933
+ #else
934
+ return false;
935
+ #endif
936
+ }
937
+
938
+ static inline bool cpuinfo_has_x86_prefetch(void) {
939
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
940
+ return cpuinfo_isa.prefetch;
941
+ #else
942
+ return false;
943
+ #endif
944
+ }
945
+
946
+ static inline bool cpuinfo_has_x86_prefetchw(void) {
947
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
948
+ return cpuinfo_isa.prefetchw;
949
+ #else
950
+ return false;
951
+ #endif
952
+ }
953
+
954
+ static inline bool cpuinfo_has_x86_prefetchwt1(void) {
955
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
956
+ return cpuinfo_isa.prefetchwt1;
957
+ #else
958
+ return false;
959
+ #endif
960
+ }
961
+
962
+ static inline bool cpuinfo_has_x86_daz(void) {
963
+ #if CPUINFO_ARCH_X86_64
964
+ return true;
965
+ #elif CPUINFO_ARCH_X86
966
+ #if defined(__ANDROID__)
967
+ return true;
968
+ #else
969
+ return cpuinfo_isa.daz;
970
+ #endif
971
+ #else
972
+ return false;
973
+ #endif
974
+ }
975
+
976
+ static inline bool cpuinfo_has_x86_sse(void) {
977
+ #if CPUINFO_ARCH_X86_64
978
+ return true;
979
+ #elif CPUINFO_ARCH_X86
980
+ #if defined(__ANDROID__)
981
+ return true;
982
+ #else
983
+ return cpuinfo_isa.sse;
984
+ #endif
985
+ #else
986
+ return false;
987
+ #endif
988
+ }
989
+
990
+ static inline bool cpuinfo_has_x86_sse2(void) {
991
+ #if CPUINFO_ARCH_X86_64
992
+ return true;
993
+ #elif CPUINFO_ARCH_X86
994
+ #if defined(__ANDROID__)
995
+ return true;
996
+ #else
997
+ return cpuinfo_isa.sse2;
998
+ #endif
999
+ #else
1000
+ return false;
1001
+ #endif
1002
+ }
1003
+
1004
+ static inline bool cpuinfo_has_x86_sse3(void) {
1005
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1006
+ #if defined(__ANDROID__)
1007
+ return true;
1008
+ #else
1009
+ return cpuinfo_isa.sse3;
1010
+ #endif
1011
+ #else
1012
+ return false;
1013
+ #endif
1014
+ }
1015
+
1016
+ static inline bool cpuinfo_has_x86_ssse3(void) {
1017
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1018
+ #if defined(__ANDROID__)
1019
+ return true;
1020
+ #else
1021
+ return cpuinfo_isa.ssse3;
1022
+ #endif
1023
+ #else
1024
+ return false;
1025
+ #endif
1026
+ }
1027
+
1028
+ static inline bool cpuinfo_has_x86_sse4_1(void) {
1029
+ #if CPUINFO_ARCH_X86_64
1030
+ #if defined(__ANDROID__)
1031
+ return true;
1032
+ #else
1033
+ return cpuinfo_isa.sse4_1;
1034
+ #endif
1035
+ #elif CPUINFO_ARCH_X86
1036
+ return cpuinfo_isa.sse4_1;
1037
+ #else
1038
+ return false;
1039
+ #endif
1040
+ }
1041
+
1042
+ static inline bool cpuinfo_has_x86_sse4_2(void) {
1043
+ #if CPUINFO_ARCH_X86_64
1044
+ #if defined(__ANDROID__)
1045
+ return true;
1046
+ #else
1047
+ return cpuinfo_isa.sse4_2;
1048
+ #endif
1049
+ #elif CPUINFO_ARCH_X86
1050
+ return cpuinfo_isa.sse4_2;
1051
+ #else
1052
+ return false;
1053
+ #endif
1054
+ }
1055
+
1056
+ static inline bool cpuinfo_has_x86_sse4a(void) {
1057
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1058
+ return cpuinfo_isa.sse4a;
1059
+ #else
1060
+ return false;
1061
+ #endif
1062
+ }
1063
+
1064
+ static inline bool cpuinfo_has_x86_misaligned_sse(void) {
1065
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1066
+ return cpuinfo_isa.misaligned_sse;
1067
+ #else
1068
+ return false;
1069
+ #endif
1070
+ }
1071
+
1072
+ static inline bool cpuinfo_has_x86_avx(void) {
1073
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1074
+ return cpuinfo_isa.avx;
1075
+ #else
1076
+ return false;
1077
+ #endif
1078
+ }
1079
+
1080
+ static inline bool cpuinfo_has_x86_avxvnni(void) {
1081
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1082
+ return cpuinfo_isa.avxvnni;
1083
+ #else
1084
+ return false;
1085
+ #endif
1086
+ }
1087
+
1088
+ static inline bool cpuinfo_has_x86_fma3(void) {
1089
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1090
+ return cpuinfo_isa.fma3;
1091
+ #else
1092
+ return false;
1093
+ #endif
1094
+ }
1095
+
1096
+ static inline bool cpuinfo_has_x86_fma4(void) {
1097
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1098
+ return cpuinfo_isa.fma4;
1099
+ #else
1100
+ return false;
1101
+ #endif
1102
+ }
1103
+
1104
+ static inline bool cpuinfo_has_x86_xop(void) {
1105
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1106
+ return cpuinfo_isa.xop;
1107
+ #else
1108
+ return false;
1109
+ #endif
1110
+ }
1111
+
1112
+ static inline bool cpuinfo_has_x86_f16c(void) {
1113
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1114
+ return cpuinfo_isa.f16c;
1115
+ #else
1116
+ return false;
1117
+ #endif
1118
+ }
1119
+
1120
+ static inline bool cpuinfo_has_x86_avx2(void) {
1121
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1122
+ return cpuinfo_isa.avx2;
1123
+ #else
1124
+ return false;
1125
+ #endif
1126
+ }
1127
+
1128
+ static inline bool cpuinfo_has_x86_avx512f(void) {
1129
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1130
+ return cpuinfo_isa.avx512f;
1131
+ #else
1132
+ return false;
1133
+ #endif
1134
+ }
1135
+
1136
+ static inline bool cpuinfo_has_x86_avx512pf(void) {
1137
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1138
+ return cpuinfo_isa.avx512pf;
1139
+ #else
1140
+ return false;
1141
+ #endif
1142
+ }
1143
+
1144
+ static inline bool cpuinfo_has_x86_avx512er(void) {
1145
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1146
+ return cpuinfo_isa.avx512er;
1147
+ #else
1148
+ return false;
1149
+ #endif
1150
+ }
1151
+
1152
+ static inline bool cpuinfo_has_x86_avx512cd(void) {
1153
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1154
+ return cpuinfo_isa.avx512cd;
1155
+ #else
1156
+ return false;
1157
+ #endif
1158
+ }
1159
+
1160
+ static inline bool cpuinfo_has_x86_avx512dq(void) {
1161
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1162
+ return cpuinfo_isa.avx512dq;
1163
+ #else
1164
+ return false;
1165
+ #endif
1166
+ }
1167
+
1168
+ static inline bool cpuinfo_has_x86_avx512bw(void) {
1169
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1170
+ return cpuinfo_isa.avx512bw;
1171
+ #else
1172
+ return false;
1173
+ #endif
1174
+ }
1175
+
1176
+ static inline bool cpuinfo_has_x86_avx512vl(void) {
1177
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1178
+ return cpuinfo_isa.avx512vl;
1179
+ #else
1180
+ return false;
1181
+ #endif
1182
+ }
1183
+
1184
+ static inline bool cpuinfo_has_x86_avx512ifma(void) {
1185
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1186
+ return cpuinfo_isa.avx512ifma;
1187
+ #else
1188
+ return false;
1189
+ #endif
1190
+ }
1191
+
1192
+ static inline bool cpuinfo_has_x86_avx512vbmi(void) {
1193
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1194
+ return cpuinfo_isa.avx512vbmi;
1195
+ #else
1196
+ return false;
1197
+ #endif
1198
+ }
1199
+
1200
+ static inline bool cpuinfo_has_x86_avx512vbmi2(void) {
1201
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1202
+ return cpuinfo_isa.avx512vbmi2;
1203
+ #else
1204
+ return false;
1205
+ #endif
1206
+ }
1207
+
1208
+ static inline bool cpuinfo_has_x86_avx512bitalg(void) {
1209
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1210
+ return cpuinfo_isa.avx512bitalg;
1211
+ #else
1212
+ return false;
1213
+ #endif
1214
+ }
1215
+
1216
+ static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {
1217
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1218
+ return cpuinfo_isa.avx512vpopcntdq;
1219
+ #else
1220
+ return false;
1221
+ #endif
1222
+ }
1223
+
1224
+ static inline bool cpuinfo_has_x86_avx512vnni(void) {
1225
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1226
+ return cpuinfo_isa.avx512vnni;
1227
+ #else
1228
+ return false;
1229
+ #endif
1230
+ }
1231
+
1232
+ static inline bool cpuinfo_has_x86_avx512bf16(void) {
1233
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1234
+ return cpuinfo_isa.avx512bf16;
1235
+ #else
1236
+ return false;
1237
+ #endif
1238
+ }
1239
+
1240
+ static inline bool cpuinfo_has_x86_avx512fp16(void) {
1241
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1242
+ return cpuinfo_isa.avx512fp16;
1243
+ #else
1244
+ return false;
1245
+ #endif
1246
+ }
1247
+
1248
+ static inline bool cpuinfo_has_x86_avx512vp2intersect(void) {
1249
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1250
+ return cpuinfo_isa.avx512vp2intersect;
1251
+ #else
1252
+ return false;
1253
+ #endif
1254
+ }
1255
+
1256
+ static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {
1257
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1258
+ return cpuinfo_isa.avx512_4vnniw;
1259
+ #else
1260
+ return false;
1261
+ #endif
1262
+ }
1263
+
1264
+ static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {
1265
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1266
+ return cpuinfo_isa.avx512_4fmaps;
1267
+ #else
1268
+ return false;
1269
+ #endif
1270
+ }
1271
+
1272
+ static inline bool cpuinfo_has_x86_hle(void) {
1273
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1274
+ return cpuinfo_isa.hle;
1275
+ #else
1276
+ return false;
1277
+ #endif
1278
+ }
1279
+
1280
+ static inline bool cpuinfo_has_x86_rtm(void) {
1281
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1282
+ return cpuinfo_isa.rtm;
1283
+ #else
1284
+ return false;
1285
+ #endif
1286
+ }
1287
+
1288
+ static inline bool cpuinfo_has_x86_xtest(void) {
1289
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1290
+ return cpuinfo_isa.xtest;
1291
+ #else
1292
+ return false;
1293
+ #endif
1294
+ }
1295
+
1296
+ static inline bool cpuinfo_has_x86_mpx(void) {
1297
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1298
+ return cpuinfo_isa.mpx;
1299
+ #else
1300
+ return false;
1301
+ #endif
1302
+ }
1303
+
1304
+ static inline bool cpuinfo_has_x86_cmov(void) {
1305
+ #if CPUINFO_ARCH_X86_64
1306
+ return true;
1307
+ #elif CPUINFO_ARCH_X86
1308
+ return cpuinfo_isa.cmov;
1309
+ #else
1310
+ return false;
1311
+ #endif
1312
+ }
1313
+
1314
+ static inline bool cpuinfo_has_x86_cmpxchg8b(void) {
1315
+ #if CPUINFO_ARCH_X86_64
1316
+ return true;
1317
+ #elif CPUINFO_ARCH_X86
1318
+ return cpuinfo_isa.cmpxchg8b;
1319
+ #else
1320
+ return false;
1321
+ #endif
1322
+ }
1323
+
1324
+ static inline bool cpuinfo_has_x86_cmpxchg16b(void) {
1325
+ #if CPUINFO_ARCH_X86_64
1326
+ return cpuinfo_isa.cmpxchg16b;
1327
+ #else
1328
+ return false;
1329
+ #endif
1330
+ }
1331
+
1332
+ static inline bool cpuinfo_has_x86_clwb(void) {
1333
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1334
+ return cpuinfo_isa.clwb;
1335
+ #else
1336
+ return false;
1337
+ #endif
1338
+ }
1339
+
1340
+ static inline bool cpuinfo_has_x86_movbe(void) {
1341
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1342
+ return cpuinfo_isa.movbe;
1343
+ #else
1344
+ return false;
1345
+ #endif
1346
+ }
1347
+
1348
+ static inline bool cpuinfo_has_x86_lahf_sahf(void) {
1349
+ #if CPUINFO_ARCH_X86
1350
+ return true;
1351
+ #elif CPUINFO_ARCH_X86_64
1352
+ return cpuinfo_isa.lahf_sahf;
1353
+ #else
1354
+ return false;
1355
+ #endif
1356
+ }
1357
+
1358
+ static inline bool cpuinfo_has_x86_lzcnt(void) {
1359
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1360
+ return cpuinfo_isa.lzcnt;
1361
+ #else
1362
+ return false;
1363
+ #endif
1364
+ }
1365
+
1366
+ static inline bool cpuinfo_has_x86_popcnt(void) {
1367
+ #if CPUINFO_ARCH_X86_64
1368
+ #if defined(__ANDROID__)
1369
+ return true;
1370
+ #else
1371
+ return cpuinfo_isa.popcnt;
1372
+ #endif
1373
+ #elif CPUINFO_ARCH_X86
1374
+ return cpuinfo_isa.popcnt;
1375
+ #else
1376
+ return false;
1377
+ #endif
1378
+ }
1379
+
1380
+ static inline bool cpuinfo_has_x86_tbm(void) {
1381
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1382
+ return cpuinfo_isa.tbm;
1383
+ #else
1384
+ return false;
1385
+ #endif
1386
+ }
1387
+
1388
+ static inline bool cpuinfo_has_x86_bmi(void) {
1389
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1390
+ return cpuinfo_isa.bmi;
1391
+ #else
1392
+ return false;
1393
+ #endif
1394
+ }
1395
+
1396
+ static inline bool cpuinfo_has_x86_bmi2(void) {
1397
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1398
+ return cpuinfo_isa.bmi2;
1399
+ #else
1400
+ return false;
1401
+ #endif
1402
+ }
1403
+
1404
+ static inline bool cpuinfo_has_x86_adx(void) {
1405
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1406
+ return cpuinfo_isa.adx;
1407
+ #else
1408
+ return false;
1409
+ #endif
1410
+ }
1411
+
1412
+ static inline bool cpuinfo_has_x86_aes(void) {
1413
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1414
+ return cpuinfo_isa.aes;
1415
+ #else
1416
+ return false;
1417
+ #endif
1418
+ }
1419
+
1420
+ static inline bool cpuinfo_has_x86_vaes(void) {
1421
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1422
+ return cpuinfo_isa.vaes;
1423
+ #else
1424
+ return false;
1425
+ #endif
1426
+ }
1427
+
1428
+ static inline bool cpuinfo_has_x86_pclmulqdq(void) {
1429
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1430
+ return cpuinfo_isa.pclmulqdq;
1431
+ #else
1432
+ return false;
1433
+ #endif
1434
+ }
1435
+
1436
+ static inline bool cpuinfo_has_x86_vpclmulqdq(void) {
1437
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1438
+ return cpuinfo_isa.vpclmulqdq;
1439
+ #else
1440
+ return false;
1441
+ #endif
1442
+ }
1443
+
1444
+ static inline bool cpuinfo_has_x86_gfni(void) {
1445
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1446
+ return cpuinfo_isa.gfni;
1447
+ #else
1448
+ return false;
1449
+ #endif
1450
+ }
1451
+
1452
+ static inline bool cpuinfo_has_x86_rdrand(void) {
1453
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1454
+ return cpuinfo_isa.rdrand;
1455
+ #else
1456
+ return false;
1457
+ #endif
1458
+ }
1459
+
1460
+ static inline bool cpuinfo_has_x86_rdseed(void) {
1461
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1462
+ return cpuinfo_isa.rdseed;
1463
+ #else
1464
+ return false;
1465
+ #endif
1466
+ }
1467
+
1468
+ static inline bool cpuinfo_has_x86_sha(void) {
1469
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1470
+ return cpuinfo_isa.sha;
1471
+ #else
1472
+ return false;
1473
+ #endif
1474
+ }
1475
+
1476
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1477
+ /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */
1478
+ struct cpuinfo_arm_isa {
1479
+ #if CPUINFO_ARCH_ARM
1480
+ bool thumb;
1481
+ bool thumb2;
1482
+ bool thumbee;
1483
+ bool jazelle;
1484
+ bool armv5e;
1485
+ bool armv6;
1486
+ bool armv6k;
1487
+ bool armv7;
1488
+ bool armv7mp;
1489
+ bool armv8;
1490
+ bool idiv;
1491
+
1492
+ bool vfpv2;
1493
+ bool vfpv3;
1494
+ bool d32;
1495
+ bool fp16;
1496
+ bool fma;
1497
+
1498
+ bool wmmx;
1499
+ bool wmmx2;
1500
+ bool neon;
1501
+ #endif
1502
+ #if CPUINFO_ARCH_ARM64
1503
+ bool atomics;
1504
+ bool bf16;
1505
+ bool sve;
1506
+ bool sve2;
1507
+ bool i8mm;
1508
+ #endif
1509
+ bool rdm;
1510
+ bool fp16arith;
1511
+ bool dot;
1512
+ bool jscvt;
1513
+ bool fcma;
1514
+ bool fhm;
1515
+
1516
+ bool aes;
1517
+ bool sha1;
1518
+ bool sha2;
1519
+ bool pmull;
1520
+ bool crc32;
1521
+ };
1522
+
1523
+ extern struct cpuinfo_arm_isa cpuinfo_isa;
1524
+ #endif
1525
+
1526
+ static inline bool cpuinfo_has_arm_thumb(void) {
1527
+ #if CPUINFO_ARCH_ARM
1528
+ return cpuinfo_isa.thumb;
1529
+ #else
1530
+ return false;
1531
+ #endif
1532
+ }
1533
+
1534
+ static inline bool cpuinfo_has_arm_thumb2(void) {
1535
+ #if CPUINFO_ARCH_ARM
1536
+ return cpuinfo_isa.thumb2;
1537
+ #else
1538
+ return false;
1539
+ #endif
1540
+ }
1541
+
1542
+ static inline bool cpuinfo_has_arm_v5e(void) {
1543
+ #if CPUINFO_ARCH_ARM
1544
+ return cpuinfo_isa.armv5e;
1545
+ #else
1546
+ return false;
1547
+ #endif
1548
+ }
1549
+
1550
+ static inline bool cpuinfo_has_arm_v6(void) {
1551
+ #if CPUINFO_ARCH_ARM
1552
+ return cpuinfo_isa.armv6;
1553
+ #else
1554
+ return false;
1555
+ #endif
1556
+ }
1557
+
1558
+ static inline bool cpuinfo_has_arm_v6k(void) {
1559
+ #if CPUINFO_ARCH_ARM
1560
+ return cpuinfo_isa.armv6k;
1561
+ #else
1562
+ return false;
1563
+ #endif
1564
+ }
1565
+
1566
+ static inline bool cpuinfo_has_arm_v7(void) {
1567
+ #if CPUINFO_ARCH_ARM
1568
+ return cpuinfo_isa.armv7;
1569
+ #else
1570
+ return false;
1571
+ #endif
1572
+ }
1573
+
1574
+ static inline bool cpuinfo_has_arm_v7mp(void) {
1575
+ #if CPUINFO_ARCH_ARM
1576
+ return cpuinfo_isa.armv7mp;
1577
+ #else
1578
+ return false;
1579
+ #endif
1580
+ }
1581
+
1582
+ static inline bool cpuinfo_has_arm_v8(void) {
1583
+ #if CPUINFO_ARCH_ARM64
1584
+ return true;
1585
+ #elif CPUINFO_ARCH_ARM
1586
+ return cpuinfo_isa.armv8;
1587
+ #else
1588
+ return false;
1589
+ #endif
1590
+ }
1591
+
1592
+ static inline bool cpuinfo_has_arm_idiv(void) {
1593
+ #if CPUINFO_ARCH_ARM64
1594
+ return true;
1595
+ #elif CPUINFO_ARCH_ARM
1596
+ return cpuinfo_isa.idiv;
1597
+ #else
1598
+ return false;
1599
+ #endif
1600
+ }
1601
+
1602
+ static inline bool cpuinfo_has_arm_vfpv2(void) {
1603
+ #if CPUINFO_ARCH_ARM
1604
+ return cpuinfo_isa.vfpv2;
1605
+ #else
1606
+ return false;
1607
+ #endif
1608
+ }
1609
+
1610
+ static inline bool cpuinfo_has_arm_vfpv3(void) {
1611
+ #if CPUINFO_ARCH_ARM64
1612
+ return true;
1613
+ #elif CPUINFO_ARCH_ARM
1614
+ return cpuinfo_isa.vfpv3;
1615
+ #else
1616
+ return false;
1617
+ #endif
1618
+ }
1619
+
1620
+ static inline bool cpuinfo_has_arm_vfpv3_d32(void) {
1621
+ #if CPUINFO_ARCH_ARM64
1622
+ return true;
1623
+ #elif CPUINFO_ARCH_ARM
1624
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32;
1625
+ #else
1626
+ return false;
1627
+ #endif
1628
+ }
1629
+
1630
+ static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {
1631
+ #if CPUINFO_ARCH_ARM64
1632
+ return true;
1633
+ #elif CPUINFO_ARCH_ARM
1634
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16;
1635
+ #else
1636
+ return false;
1637
+ #endif
1638
+ }
1639
+
1640
+ static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {
1641
+ #if CPUINFO_ARCH_ARM64
1642
+ return true;
1643
+ #elif CPUINFO_ARCH_ARM
1644
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32;
1645
+ #else
1646
+ return false;
1647
+ #endif
1648
+ }
1649
+
1650
+ static inline bool cpuinfo_has_arm_vfpv4(void) {
1651
+ #if CPUINFO_ARCH_ARM64
1652
+ return true;
1653
+ #elif CPUINFO_ARCH_ARM
1654
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma;
1655
+ #else
1656
+ return false;
1657
+ #endif
1658
+ }
1659
+
1660
+ static inline bool cpuinfo_has_arm_vfpv4_d32(void) {
1661
+ #if CPUINFO_ARCH_ARM64
1662
+ return true;
1663
+ #elif CPUINFO_ARCH_ARM
1664
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32;
1665
+ #else
1666
+ return false;
1667
+ #endif
1668
+ }
1669
+
1670
+ static inline bool cpuinfo_has_arm_fp16_arith(void) {
1671
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1672
+ return cpuinfo_isa.fp16arith;
1673
+ #else
1674
+ return false;
1675
+ #endif
1676
+ }
1677
+
1678
+ static inline bool cpuinfo_has_arm_bf16(void) {
1679
+ #if CPUINFO_ARCH_ARM64
1680
+ return cpuinfo_isa.bf16;
1681
+ #else
1682
+ return false;
1683
+ #endif
1684
+ }
1685
+
1686
+ static inline bool cpuinfo_has_arm_wmmx(void) {
1687
+ #if CPUINFO_ARCH_ARM
1688
+ return cpuinfo_isa.wmmx;
1689
+ #else
1690
+ return false;
1691
+ #endif
1692
+ }
1693
+
1694
+ static inline bool cpuinfo_has_arm_wmmx2(void) {
1695
+ #if CPUINFO_ARCH_ARM
1696
+ return cpuinfo_isa.wmmx2;
1697
+ #else
1698
+ return false;
1699
+ #endif
1700
+ }
1701
+
1702
+ static inline bool cpuinfo_has_arm_neon(void) {
1703
+ #if CPUINFO_ARCH_ARM64
1704
+ return true;
1705
+ #elif CPUINFO_ARCH_ARM
1706
+ return cpuinfo_isa.neon;
1707
+ #else
1708
+ return false;
1709
+ #endif
1710
+ }
1711
+
1712
+ static inline bool cpuinfo_has_arm_neon_fp16(void) {
1713
+ #if CPUINFO_ARCH_ARM64
1714
+ return true;
1715
+ #elif CPUINFO_ARCH_ARM
1716
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16;
1717
+ #else
1718
+ return false;
1719
+ #endif
1720
+ }
1721
+
1722
+ static inline bool cpuinfo_has_arm_neon_fma(void) {
1723
+ #if CPUINFO_ARCH_ARM64
1724
+ return true;
1725
+ #elif CPUINFO_ARCH_ARM
1726
+ return cpuinfo_isa.neon && cpuinfo_isa.fma;
1727
+ #else
1728
+ return false;
1729
+ #endif
1730
+ }
1731
+
1732
+ static inline bool cpuinfo_has_arm_neon_v8(void) {
1733
+ #if CPUINFO_ARCH_ARM64
1734
+ return true;
1735
+ #elif CPUINFO_ARCH_ARM
1736
+ return cpuinfo_isa.neon && cpuinfo_isa.armv8;
1737
+ #else
1738
+ return false;
1739
+ #endif
1740
+ }
1741
+
1742
+ static inline bool cpuinfo_has_arm_atomics(void) {
1743
+ #if CPUINFO_ARCH_ARM64
1744
+ return cpuinfo_isa.atomics;
1745
+ #else
1746
+ return false;
1747
+ #endif
1748
+ }
1749
+
1750
+ static inline bool cpuinfo_has_arm_neon_rdm(void) {
1751
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1752
+ return cpuinfo_isa.rdm;
1753
+ #else
1754
+ return false;
1755
+ #endif
1756
+ }
1757
+
1758
+ static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {
1759
+ #if CPUINFO_ARCH_ARM
1760
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16arith;
1761
+ #elif CPUINFO_ARCH_ARM64
1762
+ return cpuinfo_isa.fp16arith;
1763
+ #else
1764
+ return false;
1765
+ #endif
1766
+ }
1767
+
1768
+ static inline bool cpuinfo_has_arm_fhm(void) {
1769
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1770
+ return cpuinfo_isa.fhm;
1771
+ #else
1772
+ return false;
1773
+ #endif
1774
+ }
1775
+
1776
+ static inline bool cpuinfo_has_arm_neon_dot(void) {
1777
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1778
+ return cpuinfo_isa.dot;
1779
+ #else
1780
+ return false;
1781
+ #endif
1782
+ }
1783
+
1784
+ static inline bool cpuinfo_has_arm_neon_bf16(void) {
1785
+ #if CPUINFO_ARCH_ARM64
1786
+ return cpuinfo_isa.bf16;
1787
+ #else
1788
+ return false;
1789
+ #endif
1790
+ }
1791
+
1792
+ static inline bool cpuinfo_has_arm_jscvt(void) {
1793
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1794
+ return cpuinfo_isa.jscvt;
1795
+ #else
1796
+ return false;
1797
+ #endif
1798
+ }
1799
+
1800
+ static inline bool cpuinfo_has_arm_fcma(void) {
1801
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1802
+ return cpuinfo_isa.fcma;
1803
+ #else
1804
+ return false;
1805
+ #endif
1806
+ }
1807
+
1808
+ static inline bool cpuinfo_has_arm_i8mm(void) {
1809
+ #if CPUINFO_ARCH_ARM64
1810
+ return cpuinfo_isa.i8mm;
1811
+ #else
1812
+ return false;
1813
+ #endif
1814
+ }
1815
+
1816
+ static inline bool cpuinfo_has_arm_aes(void) {
1817
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1818
+ return cpuinfo_isa.aes;
1819
+ #else
1820
+ return false;
1821
+ #endif
1822
+ }
1823
+
1824
+ static inline bool cpuinfo_has_arm_sha1(void) {
1825
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1826
+ return cpuinfo_isa.sha1;
1827
+ #else
1828
+ return false;
1829
+ #endif
1830
+ }
1831
+
1832
+ static inline bool cpuinfo_has_arm_sha2(void) {
1833
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1834
+ return cpuinfo_isa.sha2;
1835
+ #else
1836
+ return false;
1837
+ #endif
1838
+ }
1839
+
1840
+ static inline bool cpuinfo_has_arm_pmull(void) {
1841
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1842
+ return cpuinfo_isa.pmull;
1843
+ #else
1844
+ return false;
1845
+ #endif
1846
+ }
1847
+
1848
+ static inline bool cpuinfo_has_arm_crc32(void) {
1849
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1850
+ return cpuinfo_isa.crc32;
1851
+ #else
1852
+ return false;
1853
+ #endif
1854
+ }
1855
+
1856
+ static inline bool cpuinfo_has_arm_sve(void) {
1857
+ #if CPUINFO_ARCH_ARM64
1858
+ return cpuinfo_isa.sve;
1859
+ #else
1860
+ return false;
1861
+ #endif
1862
+ }
1863
+
1864
+ static inline bool cpuinfo_has_arm_sve_bf16(void) {
1865
+ #if CPUINFO_ARCH_ARM64
1866
+ return cpuinfo_isa.sve && cpuinfo_isa.bf16;
1867
+ #else
1868
+ return false;
1869
+ #endif
1870
+ }
1871
+
1872
+ static inline bool cpuinfo_has_arm_sve2(void) {
1873
+ #if CPUINFO_ARCH_ARM64
1874
+ return cpuinfo_isa.sve2;
1875
+ #else
1876
+ return false;
1877
+ #endif
1878
+ }
1879
+
1880
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
1881
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
1882
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
1883
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
1884
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void);
1885
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
1886
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
1887
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
1888
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
1889
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
1890
+
1891
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
1892
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
1893
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
1894
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
1895
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index);
1896
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
1897
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
1898
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
1899
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
1900
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
1901
+
1902
+ uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
1903
+ uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
1904
+ uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
1905
+ uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
1906
+ uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void);
1907
+ uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
1908
+ uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
1909
+ uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
1910
+ uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
1911
+ uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
1912
+
1913
+ /**
1914
+ * Returns upper bound on cache size.
1915
+ */
1916
+ uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void);
1917
+
1918
+ /**
1919
+ * Identify the logical processor that executes the current thread.
1920
+ *
1921
+ * There is no guarantee that the thread will stay on the same logical processor for any time.
1922
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1923
+ */
1924
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);
1925
+
1926
+ /**
1927
+ * Identify the core that executes the current thread.
1928
+ *
1929
+ * There is no guarantee that the thread will stay on the same core for any time.
1930
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1931
+ */
1932
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
1933
+
1934
+ /**
1935
+ * Identify the microarchitecture index of the core that executes the current thread.
1936
+ * If the system does not support such identification, the function returns 0.
1937
+ *
1938
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1939
+ * Callers should treat the result as only a hint.
1940
+ */
1941
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void);
1942
+
1943
+ /**
1944
+ * Identify the microarchitecture index of the core that executes the current thread.
1945
+ * If the system does not support such identification, the function returns the user-specified default value.
1946
+ *
1947
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1948
+ * Callers should treat the result as only a hint.
1949
+ */
1950
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index);
1951
+
1952
+ #ifdef __cplusplus
1953
+ } /* extern "C" */
1954
+ #endif
1955
+
1956
+ #endif /* CPUINFO_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_H
18
+ #define DNNL_H
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #endif /* DNNL_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_config.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_CONFIG_H
18
+ #define DNNL_CONFIG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_config.h"
21
+
22
+ #endif /* DNNL_CONFIG_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_debug.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_DEBUG_H
18
+ #define DNNL_DEBUG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_debug.h"
21
+
22
+ #endif /* DNNL_DEBUG_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_ocl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_OCL_H
18
+ #define DNNL_OCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_ocl.h"
21
+
22
+ #endif /* DNNL_OCL_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_H
18
+ #define DNNL_SYCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl.h"
21
+
22
+ #endif /* DNNL_SYCL_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_TYPES_H
18
+ #define DNNL_SYCL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl_types.h"
21
+
22
+ #endif /* DNNL_SYCL_TYPES_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_THREADPOOL_H
18
+ #define DNNL_THREADPOOL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_threadpool.h"
21
+
22
+ #endif /* DNNL_THREADPOOL_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_TYPES_H
18
+ #define DNNL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_types.h"
21
+
22
+ #endif /* DNNL_TYPES_H */
llmeval-env/lib/python3.10/site-packages/torch/include/dnnl_version.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_VERSION_H
18
+ #define DNNL_VERSION_H
19
+
20
+ #include "oneapi/dnnl/dnnl_version.h"
21
+
22
+ #endif /* DNNL_VERSION_H */
llmeval-env/lib/python3.10/site-packages/torch/include/experiments-config.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023 Google LLC
2
+ //
3
+ // This source code is licensed under the BSD-style license found in the
4
+ // LICENSE file in the root directory of this source tree.
5
+
6
+ #pragma once
7
+
8
+ #include <stdbool.h>
9
+
10
+ #ifdef __cplusplus
11
+ extern "C" {
12
+ #endif
13
+
14
+ struct xnn_experiment_config {
15
+ bool adaptive_avx_optimization;
16
+ };
17
+
18
+ struct xnn_experiment_config* xnn_get_experiment_config();
19
+
20
+ void xnn_experiment_enable_adaptive_avx_optimization();
21
+
22
+
23
+ #ifdef __cplusplus
24
+ } // extern "C"
25
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/fp16.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FP16_H
3
+ #define FP16_H
4
+
5
+ #include <fp16/fp16.h>
6
+
7
+ #if defined(PSIMD_H)
8
+ #include <fp16/psimd.h>
9
+ #endif
10
+
11
+ #endif /* FP16_H */
llmeval-env/lib/python3.10/site-packages/torch/include/fxdiv.h ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FXDIV_H
3
+ #define FXDIV_H
4
+
5
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
6
+ #include <cstddef>
7
+ #include <cstdint>
8
+ #include <climits>
9
+ #elif !defined(__OPENCL_VERSION__)
10
+ #include <stddef.h>
11
+ #include <stdint.h>
12
+ #include <limits.h>
13
+ #endif
14
+
15
+ #if defined(_MSC_VER)
16
+ #include <intrin.h>
17
+ #if defined(_M_IX86) || defined(_M_X64)
18
+ #include <immintrin.h>
19
+ #endif
20
+ #endif
21
+
22
+ #ifndef FXDIV_USE_INLINE_ASSEMBLY
23
+ #define FXDIV_USE_INLINE_ASSEMBLY 0
24
+ #endif
25
+
26
+ static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) {
27
+ #if defined(_MSC_VER) && defined(_M_IX86)
28
+ return (uint64_t) __emulu((unsigned int) a, (unsigned int) b);
29
+ #else
30
+ return (uint64_t) a * (uint64_t) b;
31
+ #endif
32
+ }
33
+
34
+ static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) {
35
+ #if defined(__OPENCL_VERSION__)
36
+ return mul_hi(a, b);
37
+ #elif defined(__CUDA_ARCH__)
38
+ return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b);
39
+ #elif defined(_MSC_VER) && defined(_M_IX86)
40
+ return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32);
41
+ #elif defined(_MSC_VER) && defined(_M_ARM)
42
+ return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b);
43
+ #else
44
+ return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32);
45
+ #endif
46
+ }
47
+
48
+ static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) {
49
+ #if defined(__OPENCL_VERSION__)
50
+ return mul_hi(a, b);
51
+ #elif defined(__CUDA_ARCH__)
52
+ return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b);
53
+ #elif defined(_MSC_VER) && defined(_M_X64)
54
+ return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b);
55
+ #elif defined(__GNUC__) && defined(__SIZEOF_INT128__)
56
+ return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64);
57
+ #else
58
+ const uint32_t a_lo = (uint32_t) a;
59
+ const uint32_t a_hi = (uint32_t) (a >> 32);
60
+ const uint32_t b_lo = (uint32_t) b;
61
+ const uint32_t b_hi = (uint32_t) (b >> 32);
62
+
63
+ const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) +
64
+ (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo);
65
+ return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) +
66
+ ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32);
67
+ #endif
68
+ }
69
+
70
+ static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) {
71
+ #if SIZE_MAX == UINT32_MAX
72
+ return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b);
73
+ #elif SIZE_MAX == UINT64_MAX
74
+ return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b);
75
+ #else
76
+ #error Unsupported platform
77
+ #endif
78
+ }
79
+
80
+ struct fxdiv_divisor_uint32_t {
81
+ uint32_t value;
82
+ uint32_t m;
83
+ uint8_t s1;
84
+ uint8_t s2;
85
+ };
86
+
87
+ struct fxdiv_result_uint32_t {
88
+ uint32_t quotient;
89
+ uint32_t remainder;
90
+ };
91
+
92
+ struct fxdiv_divisor_uint64_t {
93
+ uint64_t value;
94
+ uint64_t m;
95
+ uint8_t s1;
96
+ uint8_t s2;
97
+ };
98
+
99
+ struct fxdiv_result_uint64_t {
100
+ uint64_t quotient;
101
+ uint64_t remainder;
102
+ };
103
+
104
+ struct fxdiv_divisor_size_t {
105
+ size_t value;
106
+ size_t m;
107
+ uint8_t s1;
108
+ uint8_t s2;
109
+ };
110
+
111
+ struct fxdiv_result_size_t {
112
+ size_t quotient;
113
+ size_t remainder;
114
+ };
115
+
116
+ static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) {
117
+ struct fxdiv_divisor_uint32_t result = { d };
118
+ if (d == 1) {
119
+ result.m = UINT32_C(1);
120
+ result.s1 = 0;
121
+ result.s2 = 0;
122
+ } else {
123
+ #if defined(__OPENCL_VERSION__)
124
+ const uint32_t l_minus_1 = 31 - clz(d - 1);
125
+ #elif defined(__CUDA_ARCH__)
126
+ const uint32_t l_minus_1 = 31 - __clz((int) (d - 1));
127
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64))
128
+ unsigned long l_minus_1;
129
+ _BitScanReverse(&l_minus_1, (unsigned long) (d - 1));
130
+ #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY
131
+ uint32_t l_minus_1;
132
+ __asm__("BSRL %[d_minus_1], %[l_minus_1]"
133
+ : [l_minus_1] "=r" (l_minus_1)
134
+ : [d_minus_1] "r" (d - 1)
135
+ : "cc");
136
+ #elif defined(__GNUC__)
137
+ const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1);
138
+ #else
139
+ /* Based on Algorithm 2 from Hacker's delight */
140
+
141
+ uint32_t l_minus_1 = 0;
142
+ uint32_t x = d - 1;
143
+ uint32_t y = x >> 16;
144
+ if (y != 0) {
145
+ l_minus_1 += 16;
146
+ x = y;
147
+ }
148
+ y = x >> 8;
149
+ if (y != 0) {
150
+ l_minus_1 += 8;
151
+ x = y;
152
+ }
153
+ y = x >> 4;
154
+ if (y != 0) {
155
+ l_minus_1 += 4;
156
+ x = y;
157
+ }
158
+ y = x >> 2;
159
+ if (y != 0) {
160
+ l_minus_1 += 2;
161
+ x = y;
162
+ }
163
+ if ((x & 2) != 0) {
164
+ l_minus_1 += 1;
165
+ }
166
+ #endif
167
+ uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d;
168
+
169
+ /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */
170
+ #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY
171
+ uint32_t q;
172
+ __asm__("DIVL %[d]"
173
+ : "=a" (q), "+d" (u_hi)
174
+ : [d] "r" (d), "a" (0)
175
+ : "cc");
176
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64))
177
+ unsigned int remainder;
178
+ const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder);
179
+ #else
180
+ const uint32_t q = ((uint64_t) u_hi << 32) / d;
181
+ #endif
182
+
183
+ result.m = q + UINT32_C(1);
184
+ result.s1 = 1;
185
+ result.s2 = (uint8_t) l_minus_1;
186
+ }
187
+ return result;
188
+ }
189
+
190
+ static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) {
191
+ struct fxdiv_divisor_uint64_t result = { d };
192
+ if (d == 1) {
193
+ result.m = UINT64_C(1);
194
+ result.s1 = 0;
195
+ result.s2 = 0;
196
+ } else {
197
+ #if defined(__OPENCL_VERSION__)
198
+ const uint32_t nlz_d = clz(d);
199
+ const uint32_t l_minus_1 = 63 - clz(d - 1);
200
+ #elif defined(__CUDA_ARCH__)
201
+ const uint32_t nlz_d = __clzll((long long) d);
202
+ const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1));
203
+ #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
204
+ unsigned long l_minus_1;
205
+ _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1));
206
+ unsigned long bsr_d;
207
+ _BitScanReverse64(&bsr_d, (unsigned __int64) d);
208
+ const uint32_t nlz_d = bsr_d ^ 0x3F;
209
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM))
210
+ const uint64_t d_minus_1 = d - 1;
211
+ const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0;
212
+ unsigned long l_minus_1;
213
+ if ((uint32_t) (d_minus_1 >> 32) == 0) {
214
+ _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1);
215
+ } else {
216
+ _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32));
217
+ l_minus_1 += 32;
218
+ }
219
+ const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2;
220
+ #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
221
+ uint64_t l_minus_1;
222
+ __asm__("BSRQ %[d_minus_1], %[l_minus_1]"
223
+ : [l_minus_1] "=r" (l_minus_1)
224
+ : [d_minus_1] "r" (d - 1)
225
+ : "cc");
226
+ #elif defined(__GNUC__)
227
+ const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1);
228
+ const uint32_t nlz_d = __builtin_clzll(d);
229
+ #else
230
+ /* Based on Algorithm 2 from Hacker's delight */
231
+ const uint64_t d_minus_1 = d - 1;
232
+ const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0;
233
+ uint32_t l_minus_1 = 0;
234
+ uint32_t x = (uint32_t) d_minus_1;
235
+ uint32_t y = d_minus_1 >> 32;
236
+ if (y != 0) {
237
+ l_minus_1 += 32;
238
+ x = y;
239
+ }
240
+ y = x >> 16;
241
+ if (y != 0) {
242
+ l_minus_1 += 16;
243
+ x = y;
244
+ }
245
+ y = x >> 8;
246
+ if (y != 0) {
247
+ l_minus_1 += 8;
248
+ x = y;
249
+ }
250
+ y = x >> 4;
251
+ if (y != 0) {
252
+ l_minus_1 += 4;
253
+ x = y;
254
+ }
255
+ y = x >> 2;
256
+ if (y != 0) {
257
+ l_minus_1 += 2;
258
+ x = y;
259
+ }
260
+ if ((x & 2) != 0) {
261
+ l_minus_1 += 1;
262
+ }
263
+ const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2;
264
+ #endif
265
+ uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d;
266
+
267
+ /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */
268
+ #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
269
+ uint64_t q;
270
+ __asm__("DIVQ %[d]"
271
+ : "=a" (q), "+d" (u_hi)
272
+ : [d] "r" (d), "a" (UINT64_C(0))
273
+ : "cc");
274
+ #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__)
275
+ /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */
276
+ const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d));
277
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64)
278
+ unsigned __int64 remainder;
279
+ const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder);
280
+ #else
281
+ /* Implementation based on code from Hacker's delight */
282
+
283
+ /* Normalize divisor and shift divident left */
284
+ d <<= nlz_d;
285
+ u_hi <<= nlz_d;
286
+ /* Break divisor up into two 32-bit digits */
287
+ const uint64_t d_hi = (uint32_t) (d >> 32);
288
+ const uint32_t d_lo = (uint32_t) d;
289
+
290
+ /* Compute the first quotient digit, q1 */
291
+ uint64_t q1 = u_hi / d_hi;
292
+ uint64_t r1 = u_hi - q1 * d_hi;
293
+
294
+ while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) {
295
+ q1 -= 1;
296
+ r1 += d_hi;
297
+ if ((r1 >> 32) != 0) {
298
+ break;
299
+ }
300
+ }
301
+
302
+ /* Multiply and subtract. */
303
+ u_hi = (u_hi << 32) - q1 * d;
304
+
305
+ /* Compute the second quotient digit, q0 */
306
+ uint64_t q0 = u_hi / d_hi;
307
+ uint64_t r0 = u_hi - q0 * d_hi;
308
+
309
+ while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) {
310
+ q0 -= 1;
311
+ r0 += d_hi;
312
+ if ((r0 >> 32) != 0) {
313
+ break;
314
+ }
315
+ }
316
+ const uint64_t q = (q1 << 32) | (uint32_t) q0;
317
+ #endif
318
+ result.m = q + UINT64_C(1);
319
+ result.s1 = 1;
320
+ result.s2 = (uint8_t) l_minus_1;
321
+ }
322
+ return result;
323
+ }
324
+
325
+ static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) {
326
+ #if SIZE_MAX == UINT32_MAX
327
+ const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d);
328
+ #elif SIZE_MAX == UINT64_MAX
329
+ const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d);
330
+ #else
331
+ #error Unsupported platform
332
+ #endif
333
+ struct fxdiv_divisor_size_t size_result = {
334
+ (size_t) uint_result.value,
335
+ (size_t) uint_result.m,
336
+ uint_result.s1,
337
+ uint_result.s2
338
+ };
339
+ return size_result;
340
+ }
341
+
342
+ static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
343
+ const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m);
344
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
345
+ }
346
+
347
+ static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
348
+ const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m);
349
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
350
+ }
351
+
352
+ static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
353
+ #if SIZE_MAX == UINT32_MAX
354
+ const struct fxdiv_divisor_uint32_t uint32_divisor = {
355
+ (uint32_t) divisor.value,
356
+ (uint32_t) divisor.m,
357
+ divisor.s1,
358
+ divisor.s2
359
+ };
360
+ return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor);
361
+ #elif SIZE_MAX == UINT64_MAX
362
+ const struct fxdiv_divisor_uint64_t uint64_divisor = {
363
+ (uint64_t) divisor.value,
364
+ (uint64_t) divisor.m,
365
+ divisor.s1,
366
+ divisor.s2
367
+ };
368
+ return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor);
369
+ #else
370
+ #error Unsupported platform
371
+ #endif
372
+ }
373
+
374
+ static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
375
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
376
+ return n - quotient * divisor.value;
377
+ }
378
+
379
+ static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
380
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
381
+ return n - quotient * divisor.value;
382
+ }
383
+
384
+ static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
385
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
386
+ return n - quotient * divisor.value;
387
+ }
388
+
389
+ static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) {
390
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity);
391
+ return quotient * granularity.value;
392
+ }
393
+
394
+ static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) {
395
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity);
396
+ return quotient * granularity.value;
397
+ }
398
+
399
+ static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) {
400
+ const size_t quotient = fxdiv_quotient_size_t(n, granularity);
401
+ return quotient * granularity.value;
402
+ }
403
+
404
+ static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
405
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
406
+ const uint32_t remainder = n - quotient * divisor.value;
407
+ struct fxdiv_result_uint32_t result = { quotient, remainder };
408
+ return result;
409
+ }
410
+
411
+ static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
412
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
413
+ const uint64_t remainder = n - quotient * divisor.value;
414
+ struct fxdiv_result_uint64_t result = { quotient, remainder };
415
+ return result;
416
+ }
417
+
418
+ static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
419
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
420
+ const size_t remainder = n - quotient * divisor.value;
421
+ struct fxdiv_result_size_t result = { quotient, remainder };
422
+ return result;
423
+ }
424
+
425
+ #endif /* FXDIV_H */
llmeval-env/lib/python3.10/site-packages/torch/include/libshm.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/MapAllocator.h>
4
+
5
+ #ifdef __cplusplus
6
+
7
+ void libshm_init(const char* manager_exec_path);
8
+
9
+ // Superclass to run a constructor before at::RefcountedMapAllocator
10
+ class THManagedMapAllocatorInit {
11
+ protected:
12
+ THManagedMapAllocatorInit(const char* manager_handle, const char* filename);
13
+ std::string manager_handle_;
14
+ };
15
+
16
+ // Like a at::RefcountedMapAllocator, but it also makes use of an external
17
+ // shared memory manager process to ensure that shared memory regions actually
18
+ // get freed in the end (even if processes lose the memory).
19
+ class THManagedMapAllocator : private THManagedMapAllocatorInit,
20
+ public at::RefcountedMapAllocator {
21
+ public:
22
+ THManagedMapAllocator(
23
+ const char* manager_handle,
24
+ const char* filename,
25
+ int flags,
26
+ size_t size);
27
+
28
+ void close() override;
29
+
30
+ ~THManagedMapAllocator() override {
31
+ close();
32
+ }
33
+
34
+ static at::DataPtr makeDataPtr(
35
+ const char* manager_handle,
36
+ const char* filename,
37
+ int flags,
38
+ size_t size);
39
+ static THManagedMapAllocator* fromDataPtr(const at::DataPtr&);
40
+
41
+ const char* manager_handle() const {
42
+ return manager_handle_.c_str();
43
+ }
44
+ };
45
+
46
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/nnpack.h ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stddef.h>
4
+ #include <stdint.h>
5
+ #include <stdbool.h>
6
+
7
+ #include <pthreadpool.h>
8
+
9
+ #ifdef __cplusplus
10
+ extern "C" {
11
+ #endif
12
+
13
+ /**
14
+ * @brief Status code for any NNPACK function call.
15
+ */
16
+ enum nnp_status {
17
+ /** The call succeeded, and all output arguments now contain valid data. */
18
+ nnp_status_success = 0,
19
+ /** NNPACK function was called with batch_size == 0. */
20
+ nnp_status_invalid_batch_size = 2,
21
+ /** NNPACK function was called with channels == 0. */
22
+ nnp_status_invalid_channels = 3,
23
+ /** NNPACK function was called with input_channels == 0. */
24
+ nnp_status_invalid_input_channels = 4,
25
+ /** NNPACK function was called with output_channels == 0. */
26
+ nnp_status_invalid_output_channels = 5,
27
+ /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */
28
+ nnp_status_invalid_input_size = 10,
29
+ /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */
30
+ nnp_status_invalid_input_stride = 11,
31
+ /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.:
32
+ *
33
+ * - input_padding.left >= kernel_size.width (>= pooling_size.width)
34
+ * - input_padding.right >= kernel_size.width (>= pooling_size.width)
35
+ * - input_padding.top >= kernel_size.height (>= pooling_size.height)
36
+ * - input_padding.bottom >= kernel_size.height (>= pooling_size.height)
37
+ */
38
+ nnp_status_invalid_input_padding = 12,
39
+ /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */
40
+ nnp_status_invalid_kernel_size = 13,
41
+ /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */
42
+ nnp_status_invalid_pooling_size = 14,
43
+ /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */
44
+ nnp_status_invalid_pooling_stride = 15,
45
+ /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */
46
+ nnp_status_invalid_algorithm = 16,
47
+ /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */
48
+ nnp_status_invalid_transform_strategy = 17,
49
+ /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */
50
+ nnp_status_invalid_output_subsampling = 13,
51
+ /** NNPACK function was called with activation not in nnp_activation enum */
52
+ nnp_status_invalid_activation = 14,
53
+ /** NNPACK function was called with invalid activation parameters */
54
+ nnp_status_invalid_activation_parameters = 15,
55
+
56
+ /** NNPACK does not support the particular input size for the function */
57
+ nnp_status_unsupported_input_size = 20,
58
+ /** NNPACK does not support the particular input stride for the function */
59
+ nnp_status_unsupported_input_stride = 21,
60
+ /** NNPACK does not support the particular input padding for the function */
61
+ nnp_status_unsupported_input_padding = 22,
62
+ /** NNPACK does not support the particular kernel size for the function */
63
+ nnp_status_unsupported_kernel_size = 23,
64
+ /** NNPACK does not support the particular pooling size for the function */
65
+ nnp_status_unsupported_pooling_size = 24,
66
+ /** NNPACK does not support the particular pooling stride for the function */
67
+ nnp_status_unsupported_pooling_stride = 25,
68
+ /** NNPACK does not support the particular convolution algorithm for the function */
69
+ nnp_status_unsupported_algorithm = 26,
70
+ /** NNPACK does not support the particular convolution transform strategy for the algorithm */
71
+ nnp_status_unsupported_transform_strategy = 27,
72
+ /** NNPACK does not support the particular activation function for the function */
73
+ nnp_status_unsupported_activation = 28,
74
+ /** NNPACK does not support the particular activation function parameters for the function */
75
+ nnp_status_unsupported_activation_parameters = 29,
76
+
77
+ /** NNPACK function was called before the library was initialized */
78
+ nnp_status_uninitialized = 50,
79
+ /** NNPACK does not implement this function for the host CPU */
80
+ nnp_status_unsupported_hardware = 51,
81
+ /** NNPACK failed to allocate memory for temporary buffers */
82
+ nnp_status_out_of_memory = 52,
83
+ /** Scratch space buffer is too small */
84
+ nnp_status_insufficient_buffer = 53,
85
+ /** Scratch space buffer is not properly aligned */
86
+ nnp_status_misaligned_buffer = 54
87
+ };
88
+
89
+ /**
90
+ * @brief Activation applied applied after a convolutional or fully-connected layer.
91
+ */
92
+ enum nnp_activation {
93
+ /** Identity activation f(x) := x, i.e. no transformation */
94
+ nnp_activation_identity = 0,
95
+ /** ReLU activation f(x) := max(0, x) */
96
+ nnp_activation_relu = 1,
97
+ };
98
+
99
+ /**
100
+ * @brief Algorithm for computing convolutional layers.
101
+ */
102
+ enum nnp_convolution_algorithm {
103
+ /** Let NNPACK choose the algorithm depending on layer parameters */
104
+ nnp_convolution_algorithm_auto = 0,
105
+ /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */
106
+ nnp_convolution_algorithm_ft8x8 = 1,
107
+ /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */
108
+ nnp_convolution_algorithm_ft16x16 = 2,
109
+ /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */
110
+ nnp_convolution_algorithm_wt8x8 = 3,
111
+ /** Direct convolution via implicit GEMM. */
112
+ nnp_convolution_algorithm_implicit_gemm = 4,
113
+ /** Direct convolution implementation. */
114
+ nnp_convolution_algorithm_direct = 5,
115
+ /**
116
+ * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16.
117
+ * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP),
118
+ * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8.
119
+ */
120
+ nnp_convolution_algorithm_wt8x8_fp16 = 6,
121
+ };
122
+
123
+ enum nnp_convolution_transform_strategy {
124
+ nnp_convolution_transform_strategy_compute = 1,
125
+ nnp_convolution_transform_strategy_precompute = 2,
126
+ nnp_convolution_transform_strategy_reuse = 3
127
+ };
128
+
129
+ /* For backward compatibility */
130
+ #define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute
131
+ #define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute
132
+
133
+ /**
134
+ * @brief Size of images, kernels, and pooling filters in NNPACK.
135
+ */
136
+ struct nnp_size {
137
+ /** Width (horizontal size) of an image, kernel, or pooling filter. */
138
+ size_t width;
139
+ /** Height (vertical size) of an image, kernel, or pooling filter. */
140
+ size_t height;
141
+ };
142
+
143
+ /**
144
+ * @brief Padding of images in NNPACK.
145
+ */
146
+ struct nnp_padding {
147
+ /** Padding above the image data */
148
+ size_t top;
149
+ /** Padding on the right of image data */
150
+ size_t right;
151
+ /** Padding below the image data */
152
+ size_t bottom;
153
+ /** Padding on the left of image data */
154
+ size_t left;
155
+ };
156
+
157
+ /**
158
+ * @brief Profiling information about time spent in different phases of a function call.
159
+ */
160
+ struct nnp_profile {
161
+ /** Time spent inside the function call, in seconds. */
162
+ double total;
163
+ /** Time spend on transformation of the input or input gradient tensor, in seconds. */
164
+ double input_transform;
165
+ /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */
166
+ double kernel_transform;
167
+ /** Time spend on transformation of the output or output gradient tensor, in seconds. */
168
+ double output_transform;
169
+ /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */
170
+ double block_multiplication;
171
+ };
172
+
173
+ enum nnp_status nnp_initialize(void);
174
+
175
+ enum nnp_status nnp_deinitialize(void);
176
+
177
+ /**
178
+ * @brief Computes output of a 2D convolutional layer from input and kernel tensors.
179
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
180
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
181
+ * For minibatch size 1, use nnp_convolution_inference for optimal performance.
182
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
183
+ *
184
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
185
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
186
+ * Supports kernels up to 8x8.
187
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
188
+ * Supports kernels up to 16x16.
189
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
190
+ * Supports only 3x3 kernels.
191
+ *
192
+ * @param batch_size The number of images on the input and output of the convolutional layer.
193
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
194
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images.
195
+ * @param input_size Size of input images, excluding implicit zero-padding.
196
+ * @param input_padding Implicit zero-padding of input images.
197
+ * @param kernel_size Kernel size.
198
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
199
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
200
+ * @param[in] bias A 1D array bias[output_channels].
201
+ * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where
202
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
203
+ * (kernel_size.height - 1)
204
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
205
+ * (kernel_size.width - 1)
206
+ * @param threadpool A thread pool for parallelization of the computation.
207
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
208
+ * @param[out] profile An optional pointer to profiling structure.
209
+ * If provided, the structure would record time spent in different phases of the computation.
210
+ */
211
+
212
+ enum nnp_status nnp_convolution_output(
213
+ enum nnp_convolution_algorithm algorithm,
214
+ size_t batch_size,
215
+ size_t input_channels,
216
+ size_t output_channels,
217
+ struct nnp_size input_size,
218
+ struct nnp_padding input_padding,
219
+ struct nnp_size kernel_size,
220
+ const float* input,
221
+ const float* kernel,
222
+ const float* bias,
223
+ float* output,
224
+ void* workspace_buffer,
225
+ size_t* workspace_size,
226
+ enum nnp_activation activation,
227
+ const void* activation_parameters,
228
+ pthreadpool_t threadpool,
229
+ struct nnp_profile* profile);
230
+
231
+ /**
232
+ * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors.
233
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
234
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
235
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
236
+ *
237
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
238
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
239
+ * Supports kernels up to 8x8.
240
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
241
+ * Supports kernels up to 16x16.
242
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
243
+ * Supports only 3x3 kernels.
244
+ *
245
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
246
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients).
247
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
248
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
249
+ * @param input_padding Implicit zero-padding of input images.
250
+ * @param kernel_size Kernel size.
251
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
252
+ * where
253
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
254
+ * (kernel_size.height - 1)
255
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
256
+ * (kernel_size.width - 1)
257
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
258
+ * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width].
259
+ * @param threadpool A thread pool for parallelization of the computation.
260
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
261
+ * @param[out] profile An optional pointer to profiling structure.
262
+ * If provided, the structure would record time spent in different phases of the computation.
263
+ */
264
+ enum nnp_status nnp_convolution_input_gradient(
265
+ enum nnp_convolution_algorithm algorithm,
266
+ size_t batch_size,
267
+ size_t input_channels,
268
+ size_t output_channels,
269
+ struct nnp_size input_size,
270
+ struct nnp_padding input_padding,
271
+ struct nnp_size kernel_size,
272
+ const float* grad_output,
273
+ const float* kernel,
274
+ float* grad_input,
275
+ void* workspace_buffer,
276
+ size_t* workspace_size,
277
+ enum nnp_activation activation,
278
+ const void* activation_parameters,
279
+ pthreadpool_t threadpool,
280
+ struct nnp_profile* profile);
281
+
282
+ /**
283
+ * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors.
284
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
285
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
286
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
287
+ *
288
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
289
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
290
+ * Supports kernels up to 8x8.
291
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
292
+ * Supports kernels up to 16x16.
293
+ *
294
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
295
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
296
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
297
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
298
+ * @param input_padding Implicit zero-padding of input images.
299
+ * @param kernel_size Kernel size.
300
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
301
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
302
+ * where
303
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
304
+ * (kernel_size.height - 1)
305
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
306
+ * (kernel_size.width - 1)
307
+ * @param[out] grad_kernel A 4D tensor
308
+ * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
309
+ * @param threadpool A thread pool for parallelization of the computation.
310
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
311
+ * @param[out] profile An optional pointer to profiling structure.
312
+ * If provided, the structure would record time spent in different phases of the computation.
313
+ */
314
+ enum nnp_status nnp_convolution_kernel_gradient(
315
+ enum nnp_convolution_algorithm algorithm,
316
+ size_t batch_size,
317
+ size_t input_channels,
318
+ size_t output_channels,
319
+ struct nnp_size input_size,
320
+ struct nnp_padding input_padding,
321
+ struct nnp_size kernel_size,
322
+ const float* input,
323
+ const float* grad_output,
324
+ float* grad_kernel,
325
+ void* workspace_buffer,
326
+ size_t* workspace_size,
327
+ enum nnp_activation activation,
328
+ const void* activation_parameters,
329
+ pthreadpool_t threadpool,
330
+ struct nnp_profile* profile);
331
+
332
+ /**
333
+ * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor.
334
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
335
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
336
+ *
337
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
338
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
339
+ * Supports kernels up to 8x8.
340
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
341
+ * Supports kernels up to 16x16.
342
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
343
+ * Supports only 3x3 kernels.
344
+ *
345
+ * @param transform_strategy A strategy that guides computation of kernel transforms coefficients.
346
+ * Possible values are:
347
+ *
348
+ * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed
349
+ * coefficients.
350
+ * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed
351
+ * coefficients.
352
+ *
353
+ * @param input_channels The number of channels (AKA features, dimensions) in the input image.
354
+ * @param output_channels The number of channels (AKA features, dimensions) in the output image.
355
+ * @param input_size Size of input image, excluding implicit zero-padding.
356
+ * @param input_padding Implicit zero-padding of input image.
357
+ * @param kernel_size Kernel size.
358
+ * @param output_subsampling Subsample region for output, also known as convolution stride.
359
+ * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width].
360
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
361
+ * @param[in] bias A 1D array bias[output_channels].
362
+ * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where
363
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
364
+ * (kernel_size.height - 1)
365
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
366
+ * (kernel_size.width - 1)
367
+ * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes.
368
+ * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size
369
+ * of required workspace memory at the workspace_size location, and exit without
370
+ * computations.
371
+ * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory
372
+ * before and deallocate after this computation, potentially at significant runtime cost.
373
+ * @param[in,out] workspace_size Pointer to the size of workspace buffer.
374
+ * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to
375
+ * the location specified by this pointer.
376
+ * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of
377
+ * the buffer, in bytes.
378
+ * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK
379
+ * would allocate memory before and deallocate after this computation, potentially at
380
+ * significant runtime cost.
381
+ * @param threadpool A thread pool for parallelization of the computation.
382
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
383
+ * @param[out] profile An optional pointer to profiling structure.
384
+ * If provided, the structure would record time spent in different phases of the computation.
385
+ */
386
+ enum nnp_status nnp_convolution_inference(
387
+ enum nnp_convolution_algorithm algorithm,
388
+ enum nnp_convolution_transform_strategy transform_strategy,
389
+ size_t input_channels,
390
+ size_t output_channels,
391
+ struct nnp_size input_size,
392
+ struct nnp_padding input_padding,
393
+ struct nnp_size kernel_size,
394
+ struct nnp_size output_subsampling,
395
+ const float* input,
396
+ const float* kernel,
397
+ const float* bias,
398
+ float* output,
399
+ void* workspace_buffer,
400
+ size_t* workspace_size,
401
+ enum nnp_activation activation,
402
+ const void* activation_parameters,
403
+ pthreadpool_t threadpool,
404
+ struct nnp_profile* profile);
405
+
406
+ /**
407
+ * @brief Computes output of a fully connected layer from input and kernel matrices.
408
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
409
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
410
+ * For minibatch size 1, use nnp_fully_connected_inference for optimal performance.
411
+ * @param batch_size The number of vectors on the input and output of the fully connected layer.
412
+ * @param input_channels The number of channels (AKA features, dimensions) in the input matrix.
413
+ * @param output_channels The number of channels (AKA features, dimensions) in the output matrix.
414
+ * @param[in] input A 2D matrix input[batch_size][input_channels].
415
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels].
416
+ * @param[out] output A 2D matrix output[batch_size][output_channels].
417
+ * @param threadpool A thread pool for parallelization of the computation.
418
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
419
+ */
420
+ enum nnp_status nnp_fully_connected_output(
421
+ size_t batch_size,
422
+ size_t input_channels,
423
+ size_t output_channels,
424
+ const float input[],
425
+ const float kernel[],
426
+ float output[],
427
+ pthreadpool_t threadpool,
428
+ struct nnp_profile* profile);
429
+
430
+ /**
431
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
432
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
433
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
434
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
435
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
436
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements.
437
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
438
+ * @param threadpool A thread pool for parallelization of the computation.
439
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
440
+ */
441
+ enum nnp_status nnp_fully_connected_inference(
442
+ size_t input_channels,
443
+ size_t output_channels,
444
+ const float* input,
445
+ const float* kernel,
446
+ float* output,
447
+ pthreadpool_t threadpool);
448
+
449
+ /**
450
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
451
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
452
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
453
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
454
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
455
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements.
456
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
457
+ * @param threadpool A thread pool for parallelization of the computation.
458
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
459
+ */
460
+ enum nnp_status nnp_fully_connected_inference_f16f32(
461
+ size_t input_channels,
462
+ size_t output_channels,
463
+ const float* input,
464
+ const void* kernel,
465
+ float* output,
466
+ pthreadpool_t threadpool);
467
+
468
+ /**
469
+ * @brief Computes output of a max-pooling layer for an input tensor.
470
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
471
+ * propagation. Is is optimized for both large and small minibatch sizes.
472
+ * @param batch_size The number of images on the input and output of the max-pooling layer.
473
+ * @param channels The number of channels (AKA features, dimensions) in both input and output images.
474
+ * @param input_size Size of input images, excluding implicit zero-padding.
475
+ * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but
476
+ * affect the output size.
477
+ * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported.
478
+ * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported.
479
+ * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width].
480
+ * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where
481
+ * output_size.height = ceil(
482
+ * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) /
483
+ * pooling_stride.height) + 1
484
+ * output_size.width = ceil(
485
+ * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) /
486
+ * pooling_stride.width) + 1
487
+ * @param threadpool A thread pool for parallelization of the computation.
488
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
489
+ */
490
+ enum nnp_status nnp_max_pooling_output(
491
+ size_t batch_size,
492
+ size_t channels,
493
+ struct nnp_size input_size,
494
+ struct nnp_padding input_padding,
495
+ struct nnp_size pooling_size,
496
+ struct nnp_size pooling_stride,
497
+ const float input[],
498
+ float output[],
499
+ pthreadpool_t threadpool);
500
+
501
+ /**
502
+ * @brief Computes output of a softmax layer for an input matrix.
503
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
504
+ * propagation. Is is optimized for both large and small minibatch sizes.
505
+ * @param batch_size The number of vectors on the input and output of the softmax layer.
506
+ * @param channels The number of channels (AKA features, dimensions) in both input and output vectors.
507
+ * @param[in] input A 2D matrix input[batch_size][channels].
508
+ * @param[out] output A 2D matrix output[batch_size][channels].
509
+ * @param threadpool A thread pool for parallelization of the computation.
510
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
511
+ */
512
+ enum nnp_status nnp_softmax_output(
513
+ size_t batch_size,
514
+ size_t channels,
515
+ const float input[],
516
+ float output[],
517
+ pthreadpool_t threadpool);
518
+
519
+ /**
520
+ * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix.
521
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
522
+ * propagation. Is is optimized for both large and small minibatch sizes.
523
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
524
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
525
+ * @param[in] input A 2D matrix input[batch_size][channels].
526
+ * @param[out] output A 2D matrix output[batch_size][channels].
527
+ * @param threadpool A thread pool for parallelization of the computation.
528
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
529
+ */
530
+ enum nnp_status nnp_relu_output(
531
+ size_t batch_size,
532
+ size_t channels,
533
+ const float input[],
534
+ float output[],
535
+ float negative_slope,
536
+ pthreadpool_t threadpool);
537
+
538
+ /**
539
+ * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices.
540
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
541
+ * Is is optimized for both large and small minibatch sizes.
542
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
543
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
544
+ * @param[in] input A 2D matrix input[batch_size][channels].
545
+ * @param[out] output A 2D matrix output[batch_size][channels].
546
+ * @param threadpool A thread pool for parallelization of the computation.
547
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
548
+ */
549
+ enum nnp_status nnp_relu_input_gradient(
550
+ size_t batch_size,
551
+ size_t channels,
552
+ const float grad_output[],
553
+ const float input[],
554
+ float grad_input[],
555
+ float negative_slope,
556
+ pthreadpool_t threadpool);
557
+
558
+ #ifdef __cplusplus
559
+ } /* extern "C" */
560
+ #endif
561
+
562
+ #ifdef __cplusplus
563
+ // Backward compatible implementations for nnp_convolution_*, if we are in C++
564
+ // mode.
565
+ inline enum nnp_status nnp_convolution_output(
566
+ enum nnp_convolution_algorithm algorithm,
567
+ size_t batch_size,
568
+ size_t input_channels,
569
+ size_t output_channels,
570
+ struct nnp_size input_size,
571
+ struct nnp_padding input_padding,
572
+ struct nnp_size kernel_size,
573
+ const float input[],
574
+ const float kernel[],
575
+ const float bias[],
576
+ float output[],
577
+ pthreadpool_t threadpool,
578
+ struct nnp_profile* profile)
579
+ {
580
+ return nnp_convolution_output(
581
+ algorithm,
582
+ batch_size, input_channels, output_channels,
583
+ input_size, input_padding, kernel_size,
584
+ input, kernel, bias, output,
585
+ NULL, NULL,
586
+ nnp_activation_identity, NULL, threadpool, profile);
587
+ }
588
+
589
+ inline enum nnp_status nnp_convolution_input_gradient(
590
+ enum nnp_convolution_algorithm algorithm,
591
+ size_t batch_size,
592
+ size_t input_channels,
593
+ size_t output_channels,
594
+ struct nnp_size input_size,
595
+ struct nnp_padding input_padding,
596
+ struct nnp_size kernel_size,
597
+ const float grad_output[],
598
+ const float kernel[],
599
+ float grad_input[],
600
+ pthreadpool_t threadpool,
601
+ struct nnp_profile* profile)
602
+ {
603
+ return nnp_convolution_input_gradient(
604
+ algorithm,
605
+ batch_size, input_channels, output_channels,
606
+ input_size, input_padding, kernel_size,
607
+ grad_output, kernel, grad_input,
608
+ NULL, NULL,
609
+ nnp_activation_identity, NULL, threadpool, profile);
610
+ }
611
+
612
+ inline enum nnp_status nnp_convolution_kernel_gradient(
613
+ enum nnp_convolution_algorithm algorithm,
614
+ size_t batch_size,
615
+ size_t input_channels,
616
+ size_t output_channels,
617
+ struct nnp_size input_size,
618
+ struct nnp_padding input_padding,
619
+ struct nnp_size kernel_size,
620
+ const float input[],
621
+ const float grad_output[],
622
+ float grad_kernel[],
623
+ pthreadpool_t threadpool,
624
+ struct nnp_profile* profile)
625
+ {
626
+ return nnp_convolution_kernel_gradient(
627
+ algorithm,
628
+ batch_size, input_channels, output_channels,
629
+ input_size, input_padding, kernel_size,
630
+ input, grad_output, grad_kernel,
631
+ NULL, NULL,
632
+ nnp_activation_identity, NULL, threadpool, profile);
633
+ }
634
+
635
+ inline enum nnp_status nnp_convolution_inference(
636
+ enum nnp_convolution_algorithm algorithm,
637
+ enum nnp_convolution_transform_strategy transform_strategy,
638
+ size_t input_channels,
639
+ size_t output_channels,
640
+ struct nnp_size input_size,
641
+ struct nnp_padding input_padding,
642
+ struct nnp_size kernel_size,
643
+ struct nnp_size output_subsampling,
644
+ const float input[],
645
+ const float kernel[],
646
+ const float bias[],
647
+ float output[],
648
+ pthreadpool_t threadpool,
649
+ struct nnp_profile* profile) {
650
+ return nnp_convolution_inference(
651
+ algorithm, transform_strategy,
652
+ input_channels, output_channels,
653
+ input_size, input_padding, kernel_size, output_subsampling,
654
+ input, kernel, bias, output, NULL, NULL,
655
+ nnp_activation_identity, NULL,
656
+ threadpool, profile);
657
+ }
658
+
659
+ #endif // __cplusplus
llmeval-env/lib/python3.10/site-packages/torch/include/psimd.h ADDED
@@ -0,0 +1,1384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef PSIMD_H
3
+ #define PSIMD_H
4
+
5
+ #if defined(__CUDA_ARCH__)
6
+ /* CUDA compiler */
7
+ #define PSIMD_INTRINSIC __forceinline__ __device__
8
+ #elif defined(__OPENCL_VERSION__)
9
+ /* OpenCL compiler */
10
+ #define PSIMD_INTRINSIC inline static
11
+ #elif defined(__INTEL_COMPILER)
12
+ /* Intel compiler, even on Windows */
13
+ #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
14
+ #elif defined(__GNUC__)
15
+ /* GCC-compatible compiler (gcc/clang/icc) */
16
+ #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
17
+ #elif defined(_MSC_VER)
18
+ /* MSVC-compatible compiler (cl/icl/clang-cl) */
19
+ #define PSIMD_INTRINSIC __forceinline static
20
+ #elif defined(__cplusplus)
21
+ /* Generic C++ compiler */
22
+ #define PSIMD_INTRINSIC inline static
23
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
24
+ /* Generic C99 compiler */
25
+ #define PSIMD_INTRINSIC inline static
26
+ #else
27
+ /* Generic C compiler */
28
+ #define PSIMD_INTRINSIC static
29
+ #endif
30
+
31
+ #if defined(__GNUC__) || defined(__clang__)
32
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
33
+ #include <arm_neon.h>
34
+ #endif
35
+
36
+ #if defined(__SSE2__)
37
+ #include <emmintrin.h>
38
+ #endif
39
+
40
+ #if defined(__SSE3__)
41
+ #include <pmmintrin.h>
42
+ #endif
43
+
44
+ #if defined(__SSSE3__)
45
+ #include <tmmintrin.h>
46
+ #endif
47
+
48
+ #if defined(__SSE4_1__)
49
+ #include <smmintrin.h>
50
+ #endif
51
+
52
+ #if defined(__SSE4_2__)
53
+ #include <nmmintrin.h>
54
+ #endif
55
+
56
+ #if defined(__AVX__)
57
+ #include <immintrin.h>
58
+ #endif
59
+ #elif defined(_MSC_VER)
60
+ #include <intrin.h>
61
+ #endif
62
+
63
+ #if defined(__cplusplus)
64
+ #define PSIMD_CXX_SYNTAX
65
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
66
+ #define PSIMD_C11_SYNTAX
67
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
68
+ #define PSIMD_C99_SYNTAX
69
+ #else
70
+ #define PSIMD_C89_SYNTAX
71
+ #endif
72
+
73
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
74
+ #include <cstddef>
75
+ #include <cstdint>
76
+ #elif !defined(__OPENCL_VERSION__)
77
+ #include <stddef.h>
78
+ #include <stdint.h>
79
+ #endif
80
+
81
+ #if defined(__GNUC__) || defined(__clang__)
82
+ #define PSIMD_HAVE_F64 0
83
+ #define PSIMD_HAVE_F32 1
84
+ #define PSIMD_HAVE_U8 1
85
+ #define PSIMD_HAVE_S8 1
86
+ #define PSIMD_HAVE_U16 1
87
+ #define PSIMD_HAVE_S16 1
88
+ #define PSIMD_HAVE_U32 1
89
+ #define PSIMD_HAVE_S32 1
90
+ #define PSIMD_HAVE_U64 0
91
+ #define PSIMD_HAVE_S64 0
92
+
93
+ typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1)));
94
+ typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1)));
95
+ typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2)));
96
+ typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2)));
97
+ typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4)));
98
+ typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4)));
99
+ typedef float psimd_f32 __attribute__((vector_size(16), aligned(4)));
100
+
101
+ typedef struct {
102
+ psimd_s8 lo;
103
+ psimd_s8 hi;
104
+ } psimd_s8x2;
105
+
106
+ typedef struct {
107
+ psimd_u8 lo;
108
+ psimd_u8 hi;
109
+ } psimd_u8x2;
110
+
111
+ typedef struct {
112
+ psimd_s16 lo;
113
+ psimd_s16 hi;
114
+ } psimd_s16x2;
115
+
116
+ typedef struct {
117
+ psimd_u16 lo;
118
+ psimd_u16 hi;
119
+ } psimd_u16x2;
120
+
121
+ typedef struct {
122
+ psimd_s32 lo;
123
+ psimd_s32 hi;
124
+ } psimd_s32x2;
125
+
126
+ typedef struct {
127
+ psimd_u32 lo;
128
+ psimd_u32 hi;
129
+ } psimd_u32x2;
130
+
131
+ typedef struct {
132
+ psimd_f32 lo;
133
+ psimd_f32 hi;
134
+ } psimd_f32x2;
135
+
136
+ /* Bit casts */
137
+ PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) {
138
+ return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
139
+ }
140
+
141
+ PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) {
142
+ return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
143
+ }
144
+
145
+ PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) {
146
+ return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
147
+ }
148
+
149
+ PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) {
150
+ return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
151
+ }
152
+
153
+ PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) {
154
+ return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
155
+ }
156
+
157
+ PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) {
158
+ return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
159
+ }
160
+
161
+ /* Swap */
162
+ PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) {
163
+ const psimd_s8 new_a = *b;
164
+ const psimd_s8 new_b = *a;
165
+ *a = new_a;
166
+ *b = new_b;
167
+ }
168
+
169
+ PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) {
170
+ const psimd_u8 new_a = *b;
171
+ const psimd_u8 new_b = *a;
172
+ *a = new_a;
173
+ *b = new_b;
174
+ }
175
+
176
+ PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) {
177
+ const psimd_s16 new_a = *b;
178
+ const psimd_s16 new_b = *a;
179
+ *a = new_a;
180
+ *b = new_b;
181
+ }
182
+
183
+ PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) {
184
+ const psimd_u16 new_a = *b;
185
+ const psimd_u16 new_b = *a;
186
+ *a = new_a;
187
+ *b = new_b;
188
+ }
189
+
190
+ PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) {
191
+ const psimd_s32 new_a = *b;
192
+ const psimd_s32 new_b = *a;
193
+ *a = new_a;
194
+ *b = new_b;
195
+ }
196
+
197
+ PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) {
198
+ const psimd_u32 new_a = *b;
199
+ const psimd_u32 new_b = *a;
200
+ *a = new_a;
201
+ *b = new_b;
202
+ }
203
+
204
+ PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) {
205
+ const psimd_f32 new_a = *b;
206
+ const psimd_f32 new_b = *a;
207
+ *a = new_a;
208
+ *b = new_b;
209
+ }
210
+
211
+ /* Zero-initialization */
212
+ PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) {
213
+ return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
214
+ }
215
+
216
+ PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) {
217
+ return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
218
+ }
219
+
220
+ PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) {
221
+ return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 };
222
+ }
223
+
224
+ PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) {
225
+ return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 };
226
+ }
227
+
228
+ PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) {
229
+ return (psimd_s32) { 0, 0, 0, 0 };
230
+ }
231
+
232
+ PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) {
233
+ return (psimd_u32) { 0, 0, 0, 0 };
234
+ }
235
+
236
+ PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) {
237
+ return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f };
238
+ }
239
+
240
+ /* Initialization to the same constant */
241
+ PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) {
242
+ return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
243
+ }
244
+
245
+ PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) {
246
+ return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
247
+ }
248
+
249
+ PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) {
250
+ return (psimd_s16) { c, c, c, c, c, c, c, c };
251
+ }
252
+
253
+ PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) {
254
+ return (psimd_u16) { c, c, c, c, c, c, c, c };
255
+ }
256
+
257
+ PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) {
258
+ return (psimd_s32) { c, c, c, c };
259
+ }
260
+
261
+ PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) {
262
+ return (psimd_u32) { c, c, c, c };
263
+ }
264
+
265
+ PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) {
266
+ return (psimd_f32) { c, c, c, c };
267
+ }
268
+
269
+ /* Load vector */
270
+ PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) {
271
+ return *((const psimd_s8*) address);
272
+ }
273
+
274
+ PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) {
275
+ return *((const psimd_u8*) address);
276
+ }
277
+
278
+ PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) {
279
+ return *((const psimd_s16*) address);
280
+ }
281
+
282
+ PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) {
283
+ return *((const psimd_u16*) address);
284
+ }
285
+
286
+ PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) {
287
+ return *((const psimd_s32*) address);
288
+ }
289
+
290
+ PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) {
291
+ return *((const psimd_u32*) address);
292
+ }
293
+
294
+ PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) {
295
+ return *((const psimd_f32*) address);
296
+ }
297
+
298
+ PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) {
299
+ return psimd_splat_s8(*((const int8_t*) address));
300
+ }
301
+
302
+ PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) {
303
+ return psimd_splat_u8(*((const uint8_t*) address));
304
+ }
305
+
306
+ PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) {
307
+ return psimd_splat_s16(*((const int16_t*) address));
308
+ }
309
+
310
+ PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) {
311
+ return psimd_splat_u16(*((const uint16_t*) address));
312
+ }
313
+
314
+ PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) {
315
+ return psimd_splat_s32(*((const int32_t*) address));
316
+ }
317
+
318
+ PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) {
319
+ return psimd_splat_u32(*((const uint32_t*) address));
320
+ }
321
+
322
+ PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) {
323
+ return psimd_splat_f32(*((const float*) address));
324
+ }
325
+
326
+ PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) {
327
+ return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 };
328
+ }
329
+
330
+ PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) {
331
+ return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 };
332
+ }
333
+
334
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) {
335
+ return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f };
336
+ }
337
+
338
+ PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) {
339
+ const int32_t* address_s32 = (const int32_t*) address;
340
+ return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 };
341
+ }
342
+
343
+ PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) {
344
+ const uint32_t* address_u32 = (const uint32_t*) address;
345
+ return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 };
346
+ }
347
+
348
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) {
349
+ const float* address_f32 = (const float*) address;
350
+ return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f };
351
+ }
352
+
353
+ PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) {
354
+ const int32_t* address_s32 = (const int32_t*) address;
355
+ return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 };
356
+ }
357
+
358
+ PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) {
359
+ const uint32_t* address_u32 = (const uint32_t*) address;
360
+ return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 };
361
+ }
362
+
363
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) {
364
+ const float* address_f32 = (const float*) address;
365
+ return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f };
366
+ }
367
+
368
+ PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) {
369
+ return psimd_load_s32(address);
370
+ }
371
+
372
+ PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) {
373
+ return psimd_load_u32(address);
374
+ }
375
+
376
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) {
377
+ return psimd_load_f32(address);
378
+ }
379
+
380
+ PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) {
381
+ const psimd_f32 v0x1x = psimd_load_f32(address);
382
+ const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3);
383
+ #if defined(__clang__)
384
+ return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7);
385
+ #else
386
+ return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 });
387
+ #endif
388
+ }
389
+
390
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) {
391
+ return psimd_load_f32(address);
392
+ }
393
+
394
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) {
395
+ const float* address_f32 = (const float*) address;
396
+ return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f };
397
+ }
398
+
399
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) {
400
+ const psimd_f32 v0x1x = psimd_load_f32(address);
401
+ const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2);
402
+ #if defined(__clang__)
403
+ return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6);
404
+ #else
405
+ return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 });
406
+ #endif
407
+ }
408
+
409
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) {
410
+ return psimd_load_stride2_f32(address);
411
+ }
412
+
413
+ PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) {
414
+ const float* address0_f32 = (const float*) address;
415
+ const float* address1_f32 = address0_f32 + stride;
416
+ const float* address2_f32 = address1_f32 + stride;
417
+ const float* address3_f32 = address2_f32 + stride;
418
+ return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 };
419
+ }
420
+
421
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) {
422
+ return psimd_load1_f32(address);
423
+ }
424
+
425
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) {
426
+ const float* address_f32 = (const float*) address;
427
+ return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f };
428
+ }
429
+
430
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) {
431
+ const float* address0_f32 = (const float*) address;
432
+ const float* address1_f32 = address0_f32 + stride;
433
+ const float* address2_f32 = address1_f32 + stride;
434
+ return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f };
435
+ }
436
+
437
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) {
438
+ return psimd_load_stride_f32(address, stride);
439
+ }
440
+
441
+ /* Store vector */
442
+ PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) {
443
+ *((psimd_s8*) address) = value;
444
+ }
445
+
446
+ PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) {
447
+ *((psimd_u8*) address) = value;
448
+ }
449
+
450
+ PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) {
451
+ *((psimd_s16*) address) = value;
452
+ }
453
+
454
+ PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) {
455
+ *((psimd_u16*) address) = value;
456
+ }
457
+
458
+ PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) {
459
+ *((psimd_s32*) address) = value;
460
+ }
461
+
462
+ PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) {
463
+ *((psimd_u32*) address) = value;
464
+ }
465
+
466
+ PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) {
467
+ *((psimd_f32*) address) = value;
468
+ }
469
+
470
+ PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) {
471
+ *((int32_t*) address) = value[0];
472
+ }
473
+
474
+ PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) {
475
+ *((uint32_t*) address) = value[0];
476
+ }
477
+
478
+ PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) {
479
+ *((float*) address) = value[0];
480
+ }
481
+
482
+ PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) {
483
+ int32_t* address_s32 = (int32_t*) address;
484
+ address_s32[0] = value[0];
485
+ address_s32[1] = value[1];
486
+ }
487
+
488
+ PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) {
489
+ uint32_t* address_u32 = (uint32_t*) address;
490
+ address_u32[0] = value[0];
491
+ address_u32[1] = value[1];
492
+ }
493
+
494
+ PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) {
495
+ float* address_f32 = (float*) address;
496
+ address_f32[0] = value[0];
497
+ address_f32[1] = value[1];
498
+ }
499
+
500
+ PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) {
501
+ int32_t* address_s32 = (int32_t*) address;
502
+ address_s32[0] = value[0];
503
+ address_s32[1] = value[1];
504
+ address_s32[2] = value[2];
505
+ }
506
+
507
+ PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) {
508
+ uint32_t* address_u32 = (uint32_t*) address;
509
+ address_u32[0] = value[0];
510
+ address_u32[1] = value[1];
511
+ address_u32[2] = value[2];
512
+ }
513
+
514
+ PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) {
515
+ float* address_f32 = (float*) address;
516
+ address_f32[0] = value[0];
517
+ address_f32[1] = value[1];
518
+ address_f32[2] = value[2];
519
+ }
520
+
521
+ PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) {
522
+ psimd_store_s32(address, value);
523
+ }
524
+
525
+ PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) {
526
+ psimd_store_u32(address, value);
527
+ }
528
+
529
+ PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) {
530
+ psimd_store_f32(address, value);
531
+ }
532
+
533
+ PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) {
534
+ float* address0_f32 = (float*) address;
535
+ float* address1_f32 = address0_f32 + stride;
536
+ float* address2_f32 = address1_f32 + stride;
537
+ float* address3_f32 = address2_f32 + stride;
538
+ *address0_f32 = value[0];
539
+ *address1_f32 = value[1];
540
+ *address2_f32 = value[2];
541
+ *address3_f32 = value[3];
542
+ }
543
+
544
+ PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) {
545
+ psimd_store1_f32(address, value);
546
+ }
547
+
548
+ PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) {
549
+ float* address_f32 = (float*) address;
550
+ address_f32[0] = value[0];
551
+ address_f32[stride] = value[1];
552
+ }
553
+
554
+ PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) {
555
+ float* address0_f32 = (float*) address;
556
+ float* address1_f32 = address0_f32 + stride;
557
+ float* address2_f32 = address1_f32 + stride;
558
+ *address0_f32 = value[0];
559
+ *address1_f32 = value[1];
560
+ *address2_f32 = value[2];
561
+ }
562
+
563
+ /* Vector addition */
564
+ PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) {
565
+ return a + b;
566
+ }
567
+
568
+ PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) {
569
+ return a + b;
570
+ }
571
+
572
+ PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) {
573
+ return a + b;
574
+ }
575
+
576
+ PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) {
577
+ return a + b;
578
+ }
579
+
580
+ PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) {
581
+ return a + b;
582
+ }
583
+
584
+ PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) {
585
+ return a + b;
586
+ }
587
+
588
+ PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) {
589
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
590
+ return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b);
591
+ #else
592
+ return a + b;
593
+ #endif
594
+ }
595
+
596
+ /* Vector subtraction */
597
+ PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) {
598
+ return a - b;
599
+ }
600
+
601
+ PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) {
602
+ return a - b;
603
+ }
604
+
605
+ PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) {
606
+ return a - b;
607
+ }
608
+
609
+ PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) {
610
+ return a - b;
611
+ }
612
+
613
+ PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) {
614
+ return a - b;
615
+ }
616
+
617
+ PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) {
618
+ return a - b;
619
+ }
620
+
621
+ PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) {
622
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
623
+ return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b);
624
+ #else
625
+ return a - b;
626
+ #endif
627
+ }
628
+
629
+ /* Vector multiplication */
630
+ PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) {
631
+ return a * b;
632
+ }
633
+
634
+ PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) {
635
+ return a * b;
636
+ }
637
+
638
+ PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) {
639
+ return a * b;
640
+ }
641
+
642
+ PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) {
643
+ return a * b;
644
+ }
645
+
646
+ PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) {
647
+ return a * b;
648
+ }
649
+
650
+ PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) {
651
+ return a * b;
652
+ }
653
+
654
+ PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) {
655
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
656
+ return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b);
657
+ #else
658
+ return a * b;
659
+ #endif
660
+ }
661
+
662
+ /* Quasi-Fused Multiply-Add */
663
+ PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) {
664
+ #if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA)
665
+ return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c);
666
+ #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__)
667
+ return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a);
668
+ #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__)
669
+ return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a);
670
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA
671
+ return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c);
672
+ #else
673
+ return a + b * c;
674
+ #endif
675
+ }
676
+
677
+ PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) {
678
+ return a / b;
679
+ }
680
+
681
+ /* Vector and */
682
+ PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) {
683
+ return (psimd_f32) (mask & (psimd_s32) v);
684
+ }
685
+
686
+ /* Vector and-not */
687
+ PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) {
688
+ return (psimd_f32) (~mask & (psimd_s32) v);
689
+ }
690
+
691
+ /* Vector blend */
692
+ PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) {
693
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
694
+ return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b);
695
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
696
+ return (psimd_s8) __builtin_wasm_bitselect(a, b, mask);
697
+ #else
698
+ return (mask & a) | (~mask & b);
699
+ #endif
700
+ }
701
+
702
+ PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) {
703
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
704
+ return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b);
705
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
706
+ return (psimd_u8) __builtin_wasm_bitselect(a, b, mask);
707
+ #else
708
+ return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b));
709
+ #endif
710
+ }
711
+
712
+ PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) {
713
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
714
+ return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b);
715
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
716
+ return (psimd_s16) __builtin_wasm_bitselect(a, b, mask);
717
+ #else
718
+ return (mask & a) | (~mask & b);
719
+ #endif
720
+ }
721
+
722
+ PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) {
723
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
724
+ return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b);
725
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
726
+ return (psimd_u16) __builtin_wasm_bitselect(a, b, mask);
727
+ #else
728
+ return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b));
729
+ #endif
730
+ }
731
+
732
+ PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) {
733
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
734
+ return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b);
735
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
736
+ return (psimd_s32) __builtin_wasm_bitselect(a, b, mask);
737
+ #else
738
+ return (mask & a) | (~mask & b);
739
+ #endif
740
+ }
741
+
742
+ PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) {
743
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
744
+ return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b);
745
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
746
+ return (psimd_u32) __builtin_wasm_bitselect(a, b, mask);
747
+ #else
748
+ return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
749
+ #endif
750
+ }
751
+
752
+ PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) {
753
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
754
+ return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b);
755
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
756
+ return (psimd_f32) __builtin_wasm_bitselect(a, b, mask);
757
+ #else
758
+ return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
759
+ #endif
760
+ }
761
+
762
+ /* Vector blend on sign */
763
+ PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) {
764
+ return psimd_blend_s8(x >> psimd_splat_s8(7), a, b);
765
+ }
766
+
767
+ PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) {
768
+ return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b);
769
+ }
770
+
771
+ PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) {
772
+ return psimd_blend_s16(x >> psimd_splat_s16(15), a, b);
773
+ }
774
+
775
+ PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) {
776
+ return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b);
777
+ }
778
+
779
+ PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) {
780
+ return psimd_blend_s32(x >> psimd_splat_s32(31), a, b);
781
+ }
782
+
783
+ PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) {
784
+ return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b);
785
+ }
786
+
787
+ PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) {
788
+ const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31);
789
+ return psimd_blend_f32(mask, a, b);
790
+ }
791
+
792
+ /* Vector absolute value */
793
+ PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) {
794
+ const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
795
+ return (psimd_f32) ((psimd_s32) v & ~mask);
796
+ }
797
+
798
+ /* Vector negation */
799
+ PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) {
800
+ const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
801
+ return (psimd_f32) ((psimd_s32) v ^ mask);
802
+ }
803
+
804
+ /* Vector maximum */
805
+ PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) {
806
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
807
+ return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b);
808
+ #else
809
+ return psimd_blend_s8(a > b, a, b);
810
+ #endif
811
+ }
812
+
813
+ PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) {
814
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
815
+ return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b);
816
+ #else
817
+ return psimd_blend_u8(a > b, a, b);
818
+ #endif
819
+ }
820
+
821
+ PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) {
822
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
823
+ return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b);
824
+ #else
825
+ return psimd_blend_s16(a > b, a, b);
826
+ #endif
827
+ }
828
+
829
+ PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) {
830
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
831
+ return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b);
832
+ #else
833
+ return psimd_blend_u16(a > b, a, b);
834
+ #endif
835
+ }
836
+
837
+ PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) {
838
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
839
+ return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b);
840
+ #else
841
+ return psimd_blend_s32(a > b, a, b);
842
+ #endif
843
+ }
844
+
845
+ PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) {
846
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
847
+ return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b);
848
+ #else
849
+ return psimd_blend_u32(a > b, a, b);
850
+ #endif
851
+ }
852
+
853
+ PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) {
854
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
855
+ return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b);
856
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
857
+ return __builtin_wasm_max_f32x4(a, b);
858
+ #else
859
+ return psimd_blend_f32(a > b, a, b);
860
+ #endif
861
+ }
862
+
863
+ /* Vector minimum */
864
+ PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) {
865
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
866
+ return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b);
867
+ #else
868
+ return psimd_blend_s8(a < b, a, b);
869
+ #endif
870
+ }
871
+
872
+ PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) {
873
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
874
+ return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b);
875
+ #else
876
+ return psimd_blend_u8(a < b, a, b);
877
+ #endif
878
+ }
879
+
880
+ PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) {
881
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
882
+ return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b);
883
+ #else
884
+ return psimd_blend_s16(a < b, a, b);
885
+ #endif
886
+ }
887
+
888
+ PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) {
889
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
890
+ return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b);
891
+ #else
892
+ return psimd_blend_u16(a < b, a, b);
893
+ #endif
894
+ }
895
+
896
+ PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) {
897
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
898
+ return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b);
899
+ #else
900
+ return psimd_blend_s32(a < b, a, b);
901
+ #endif
902
+ }
903
+
904
+ PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) {
905
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
906
+ return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b);
907
+ #else
908
+ return psimd_blend_u32(a < b, a, b);
909
+ #endif
910
+ }
911
+
912
+ PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) {
913
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
914
+ return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b);
915
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
916
+ return __builtin_wasm_min_f32x4(a, b);
917
+ #else
918
+ return psimd_blend_f32(a < b, a, b);
919
+ #endif
920
+ }
921
+
922
+ PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) {
923
+ #if defined(__clang__)
924
+ return __builtin_convertvector(v, psimd_f32);
925
+ #elif defined(__ARM_NEON__) || defined(__ARM_NEON)
926
+ return (psimd_f32) vcvtq_f32_s32((int32x4_t) v);
927
+ #elif defined(__SSE2__)
928
+ return (psimd_f32) _mm_cvtepi32_ps((__m128i) v);
929
+ #else
930
+ return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] };
931
+ #endif
932
+ }
933
+
934
+ /* Broadcast vector element */
935
+ #if defined(__clang__)
936
+ PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
937
+ return __builtin_shufflevector(v, v, 0, 0, 0, 0);
938
+ }
939
+
940
+ PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
941
+ return __builtin_shufflevector(v, v, 1, 1, 1, 1);
942
+ }
943
+
944
+ PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
945
+ return __builtin_shufflevector(v, v, 2, 2, 2, 2);
946
+ }
947
+
948
+ PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
949
+ return __builtin_shufflevector(v, v, 3, 3, 3, 3);
950
+ }
951
+ #else
952
+ PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
953
+ return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 });
954
+ }
955
+
956
+ PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
957
+ return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 });
958
+ }
959
+
960
+ PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
961
+ return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 });
962
+ }
963
+
964
+ PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
965
+ return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 });
966
+ }
967
+ #endif
968
+
969
+ /* Reversal of vector elements */
970
+ #if defined(__clang__)
971
+ PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
972
+ return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
973
+ }
974
+
975
+ PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
976
+ return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
977
+ }
978
+
979
+ PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
980
+ return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
981
+ }
982
+
983
+ PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
984
+ return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
985
+ }
986
+
987
+ PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
988
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
989
+ }
990
+
991
+ PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
992
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
993
+ }
994
+
995
+ PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
996
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
997
+ }
998
+ #else
999
+ PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
1000
+ return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
1001
+ }
1002
+
1003
+ PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
1004
+ return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
1005
+ }
1006
+
1007
+ PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
1008
+ return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
1009
+ }
1010
+
1011
+ PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
1012
+ return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
1013
+ }
1014
+
1015
+ PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
1016
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1017
+ }
1018
+
1019
+ PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
1020
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1021
+ }
1022
+
1023
+ PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
1024
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1025
+ }
1026
+ #endif
1027
+
1028
+ /* Interleaving of vector elements */
1029
+ #if defined(__clang__)
1030
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
1031
+ return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
1032
+ }
1033
+
1034
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
1035
+ return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
1036
+ }
1037
+
1038
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
1039
+ return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
1040
+ }
1041
+
1042
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
1043
+ return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
1044
+ }
1045
+
1046
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
1047
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1048
+ }
1049
+
1050
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
1051
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1052
+ }
1053
+
1054
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
1055
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1056
+ }
1057
+
1058
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
1059
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1060
+ }
1061
+
1062
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
1063
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1064
+ }
1065
+
1066
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
1067
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1068
+ }
1069
+ #else
1070
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
1071
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
1072
+ }
1073
+
1074
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
1075
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
1076
+ }
1077
+
1078
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
1079
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
1080
+ }
1081
+
1082
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
1083
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
1084
+ }
1085
+
1086
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
1087
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1088
+ }
1089
+
1090
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
1091
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1092
+ }
1093
+
1094
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
1095
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1096
+ }
1097
+
1098
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
1099
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1100
+ }
1101
+
1102
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
1103
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1104
+ }
1105
+
1106
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
1107
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1108
+ }
1109
+ #endif
1110
+
1111
+ /* Concatenation of low/high vector elements */
1112
+ #if defined(__clang__)
1113
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
1114
+ return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
1115
+ }
1116
+
1117
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
1118
+ return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
1119
+ }
1120
+
1121
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
1122
+ return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
1123
+ }
1124
+
1125
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
1126
+ return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
1127
+ }
1128
+
1129
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
1130
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1131
+ }
1132
+
1133
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
1134
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1135
+ }
1136
+
1137
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
1138
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1139
+ }
1140
+
1141
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
1142
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1143
+ }
1144
+
1145
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
1146
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1147
+ }
1148
+
1149
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
1150
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1151
+ }
1152
+ #else
1153
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
1154
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
1155
+ }
1156
+
1157
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
1158
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
1159
+ }
1160
+
1161
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
1162
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
1163
+ }
1164
+
1165
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
1166
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
1167
+ }
1168
+
1169
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
1170
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1171
+ }
1172
+
1173
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
1174
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1175
+ }
1176
+
1177
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
1178
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1179
+ }
1180
+
1181
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
1182
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1183
+ }
1184
+
1185
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
1186
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1187
+ }
1188
+
1189
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
1190
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1191
+ }
1192
+ #endif
1193
+
1194
+ /* Concatenation of even/odd vector elements */
1195
+ #if defined(__clang__)
1196
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
1197
+ return __builtin_shufflevector(a, b,
1198
+ 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
1199
+ }
1200
+
1201
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
1202
+ return __builtin_shufflevector(a, b,
1203
+ 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
1204
+ }
1205
+
1206
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
1207
+ return __builtin_shufflevector(a, b,
1208
+ 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
1209
+ }
1210
+
1211
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
1212
+ return __builtin_shufflevector(a, b,
1213
+ 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
1214
+ }
1215
+
1216
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
1217
+ return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
1218
+ }
1219
+
1220
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
1221
+ return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
1222
+ }
1223
+
1224
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
1225
+ return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
1226
+ }
1227
+
1228
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
1229
+ return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
1230
+ }
1231
+
1232
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
1233
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1234
+ }
1235
+
1236
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
1237
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1238
+ }
1239
+
1240
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
1241
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1242
+ }
1243
+
1244
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
1245
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1246
+ }
1247
+
1248
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
1249
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1250
+ }
1251
+
1252
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
1253
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1254
+ }
1255
+ #else
1256
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
1257
+ return __builtin_shuffle(a, b,
1258
+ (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
1259
+ }
1260
+
1261
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
1262
+ return __builtin_shuffle(a, b,
1263
+ (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
1264
+ }
1265
+
1266
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
1267
+ return __builtin_shuffle(a, b,
1268
+ (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
1269
+ }
1270
+
1271
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
1272
+ return __builtin_shuffle(a, b,
1273
+ (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
1274
+ }
1275
+
1276
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
1277
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
1278
+ }
1279
+
1280
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
1281
+ return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
1282
+ }
1283
+
1284
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
1285
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
1286
+ }
1287
+
1288
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
1289
+ return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
1290
+ }
1291
+
1292
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
1293
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1294
+ }
1295
+
1296
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
1297
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1298
+ }
1299
+
1300
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
1301
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1302
+ }
1303
+
1304
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
1305
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1306
+ }
1307
+
1308
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
1309
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1310
+ }
1311
+
1312
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
1313
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1314
+ }
1315
+ #endif
1316
+
1317
+ /* Vector reduce */
1318
+ #if defined(__clang__)
1319
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
1320
+ const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1);
1321
+ return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2);
1322
+ }
1323
+
1324
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
1325
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
1326
+ return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
1327
+ }
1328
+
1329
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
1330
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
1331
+ return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
1332
+ }
1333
+
1334
+ PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
1335
+ const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1);
1336
+ const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1);
1337
+ return result[0];
1338
+ }
1339
+
1340
+ PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
1341
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
1342
+ const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
1343
+ return result[0];
1344
+ }
1345
+
1346
+ PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
1347
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
1348
+ const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
1349
+ return result[0];
1350
+ }
1351
+ #else
1352
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
1353
+ const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 });
1354
+ return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 });
1355
+ }
1356
+
1357
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
1358
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
1359
+ return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
1360
+ }
1361
+
1362
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
1363
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
1364
+ return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
1365
+ }
1366
+
1367
+ PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
1368
+ const psimd_f32 result = psimd_allreduce_sum_f32(v);
1369
+ return result[0];
1370
+ }
1371
+
1372
+ PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
1373
+ const psimd_f32 result = psimd_allreduce_max_f32(v);
1374
+ return result[0];
1375
+ }
1376
+
1377
+ PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
1378
+ const psimd_f32 result = psimd_allreduce_min_f32(v);
1379
+ return result[0];
1380
+ }
1381
+ #endif
1382
+ #endif
1383
+
1384
+ #endif /* PSIMD_H */
llmeval-env/lib/python3.10/site-packages/torch/include/pthreadpool.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack.h ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <stdbool.h>
12
+ #include <stddef.h>
13
+ #include <stdint.h>
14
+
15
+ #include <pthreadpool.h>
16
+
17
+ #ifdef __cplusplus
18
+ extern "C" {
19
+ #endif
20
+
21
+ /**
22
+ * @brief Status code for any QNNPACK function call.
23
+ */
24
+ enum qnnp_status {
25
+ /** The call succeeded, and all output arguments now contain valid data. */
26
+ qnnp_status_success = 0,
27
+ qnnp_status_uninitialized = 1,
28
+ qnnp_status_invalid_parameter = 2,
29
+ qnnp_status_unsupported_parameter = 3,
30
+ qnnp_status_unsupported_hardware = 4,
31
+ qnnp_status_out_of_memory = 5,
32
+ };
33
+
34
+ enum qnnp_status qnnp_initialize(void);
35
+
36
+ enum qnnp_status qnnp_deinitialize(void);
37
+
38
+ typedef struct qnnp_operator* qnnp_operator_t;
39
+
40
+ enum qnnp_status qnnp_create_convolution2d_nhwc_q8(
41
+ uint32_t input_padding_top,
42
+ uint32_t input_padding_right,
43
+ uint32_t input_padding_bottom,
44
+ uint32_t input_padding_left,
45
+ uint32_t kernel_height,
46
+ uint32_t kernel_width,
47
+ uint32_t subsampling_height,
48
+ uint32_t subsampling_width,
49
+ uint32_t dilation_height,
50
+ uint32_t dilation_width,
51
+ uint32_t groups,
52
+ size_t group_input_channels,
53
+ size_t group_output_channels,
54
+ uint8_t input_zero_point,
55
+ float input_scale,
56
+ uint8_t kernel_zero_point,
57
+ float kernel_scale,
58
+ const uint8_t* kernel,
59
+ const int32_t* bias,
60
+ uint8_t output_zero_point,
61
+ float output_scale,
62
+ uint8_t output_min,
63
+ uint8_t output_max,
64
+ uint32_t flags,
65
+ qnnp_operator_t* convolution);
66
+
67
+ enum qnnp_status qnnp_setup_convolution2d_nhwc_q8(
68
+ qnnp_operator_t convolution,
69
+ size_t batch_size,
70
+ size_t input_height,
71
+ size_t input_width,
72
+ const uint8_t* input,
73
+ size_t input_stride,
74
+ uint8_t* output,
75
+ size_t output_stride,
76
+ pthreadpool_t threadpool);
77
+
78
+ enum qnnp_status qnnp_create_deconvolution2d_nhwc_q8(
79
+ uint32_t input_padding_top,
80
+ uint32_t input_padding_right,
81
+ uint32_t input_padding_bottom,
82
+ uint32_t input_padding_left,
83
+ uint32_t adjustment_height,
84
+ uint32_t adjustment_width,
85
+ uint32_t kernel_height,
86
+ uint32_t kernel_width,
87
+ uint32_t stride_height,
88
+ uint32_t stride_width,
89
+ uint32_t dilation_height,
90
+ uint32_t dilation_width,
91
+ uint32_t groups,
92
+ size_t group_input_channels,
93
+ size_t group_output_channels,
94
+ uint8_t input_zero_point,
95
+ float input_scale,
96
+ uint8_t kernel_zero_point,
97
+ float kernel_scale,
98
+ const uint8_t* kernel,
99
+ const int32_t* bias,
100
+ uint8_t output_zero_point,
101
+ float output_scale,
102
+ uint8_t output_min,
103
+ uint8_t output_max,
104
+ uint32_t flags,
105
+ qnnp_operator_t* deconvolution);
106
+
107
+ enum qnnp_status qnnp_setup_deconvolution2d_nhwc_q8(
108
+ qnnp_operator_t deconvolution,
109
+ size_t batch_size,
110
+ size_t input_height,
111
+ size_t input_width,
112
+ const uint8_t* input,
113
+ size_t input_stride,
114
+ uint8_t* output,
115
+ size_t output_stride,
116
+ pthreadpool_t threadpool);
117
+
118
+ enum qnnp_status qnnp_create_fully_connected_nc_q8(
119
+ size_t input_channels,
120
+ size_t output_channels,
121
+ uint8_t input_zero_point,
122
+ float input_scale,
123
+ uint8_t kernel_zero_point,
124
+ float kernel_scale,
125
+ const uint8_t* kernel,
126
+ const int32_t* bias,
127
+ uint8_t output_zero_point,
128
+ float output_scale,
129
+ uint8_t output_min,
130
+ uint8_t output_max,
131
+ uint32_t flags,
132
+ qnnp_operator_t* fully_connected);
133
+
134
+ enum qnnp_status qnnp_setup_fully_connected_nc_q8(
135
+ qnnp_operator_t fully_connected,
136
+ size_t batch_size,
137
+ const uint8_t* input,
138
+ size_t input_stride,
139
+ uint8_t* output,
140
+ size_t output_stride);
141
+
142
+ enum qnnp_status qnnp_create_global_average_pooling_nwc_q8(
143
+ size_t channels,
144
+ uint8_t input_zero_point,
145
+ float input_scale,
146
+ uint8_t output_zero_point,
147
+ float output_scale,
148
+ uint8_t output_min,
149
+ uint8_t output_max,
150
+ uint32_t flags,
151
+ qnnp_operator_t* global_average_pooling);
152
+
153
+ enum qnnp_status qnnp_setup_global_average_pooling_nwc_q8(
154
+ qnnp_operator_t global_average_pooling,
155
+ size_t batch_size,
156
+ size_t width,
157
+ const uint8_t* input,
158
+ size_t input_stride,
159
+ uint8_t* output,
160
+ size_t output_stride);
161
+
162
+ enum qnnp_status qnnp_create_average_pooling2d_nhwc_q8(
163
+ uint32_t input_padding_top,
164
+ uint32_t input_padding_right,
165
+ uint32_t input_padding_bottom,
166
+ uint32_t input_padding_left,
167
+ uint32_t pooling_height,
168
+ uint32_t pooling_width,
169
+ uint32_t stride_height,
170
+ uint32_t stride_width,
171
+ size_t channels,
172
+ uint8_t input_zero_point,
173
+ float input_scale,
174
+ uint8_t output_zero_point,
175
+ float output_scale,
176
+ uint8_t output_min,
177
+ uint8_t output_max,
178
+ uint32_t flags,
179
+ qnnp_operator_t* average_pooling);
180
+
181
+ enum qnnp_status qnnp_setup_average_pooling2d_nhwc_q8(
182
+ qnnp_operator_t average_pooling,
183
+ size_t batch_size,
184
+ size_t input_height,
185
+ size_t input_width,
186
+ const uint8_t* input,
187
+ size_t input_stride,
188
+ uint8_t* output,
189
+ size_t output_stride,
190
+ pthreadpool_t threadpool);
191
+
192
+ enum qnnp_status qnnp_create_max_pooling2d_nhwc_u8(
193
+ uint32_t input_padding_top,
194
+ uint32_t input_padding_right,
195
+ uint32_t input_padding_bottom,
196
+ uint32_t input_padding_left,
197
+ uint32_t pooling_height,
198
+ uint32_t pooling_width,
199
+ uint32_t stride_height,
200
+ uint32_t stride_width,
201
+ uint32_t dilation_height,
202
+ uint32_t dilation_width,
203
+ size_t channels,
204
+ uint8_t output_min,
205
+ uint8_t output_max,
206
+ uint32_t flags,
207
+ qnnp_operator_t* max_pooling);
208
+
209
+ enum qnnp_status qnnp_setup_max_pooling2d_nhwc_u8(
210
+ qnnp_operator_t max_pooling,
211
+ size_t batch_size,
212
+ size_t input_height,
213
+ size_t input_width,
214
+ const uint8_t* input,
215
+ size_t input_stride,
216
+ uint8_t* output,
217
+ size_t output_stride,
218
+ pthreadpool_t threadpool);
219
+
220
+ enum qnnp_status qnnp_create_channel_shuffle_nc_x8(
221
+ size_t groups,
222
+ size_t group_channels,
223
+ uint32_t flags,
224
+ qnnp_operator_t* channel_shuffle);
225
+
226
+ enum qnnp_status qnnp_setup_channel_shuffle_nc_x8(
227
+ qnnp_operator_t channel_shuffle,
228
+ size_t batch_size,
229
+ const uint8_t* input,
230
+ size_t input_stride,
231
+ uint8_t* output,
232
+ size_t output_stride);
233
+
234
+ enum qnnp_status qnnp_create_add_nc_q8(
235
+ size_t channels,
236
+ uint8_t a_zero_point,
237
+ float a_scale,
238
+ uint8_t b_zero_point,
239
+ float b_scale,
240
+ uint8_t sum_zero_point,
241
+ float sum_scale,
242
+ uint8_t sum_min,
243
+ uint8_t sum_max,
244
+ uint32_t flags,
245
+ qnnp_operator_t* add);
246
+
247
+ enum qnnp_status qnnp_setup_add_nc_q8(
248
+ qnnp_operator_t add,
249
+ size_t batch_size,
250
+ const uint8_t* a,
251
+ size_t a_stride,
252
+ const uint8_t* b,
253
+ size_t b_stride,
254
+ uint8_t* sum,
255
+ size_t sum_stride);
256
+
257
+ enum qnnp_status qnnp_create_clamp_nc_u8(
258
+ size_t channels,
259
+ uint8_t output_min,
260
+ uint8_t output_max,
261
+ uint32_t flags,
262
+ qnnp_operator_t* clamp);
263
+
264
+ enum qnnp_status qnnp_setup_clamp_nc_u8(
265
+ qnnp_operator_t clamp,
266
+ size_t batch_size,
267
+ const uint8_t* input,
268
+ size_t input_stride,
269
+ uint8_t* output,
270
+ size_t output_stride);
271
+
272
+ enum qnnp_status qnnp_create_sigmoid_nc_q8(
273
+ size_t channels,
274
+ uint8_t input_zero_point,
275
+ float input_scale,
276
+ uint8_t output_zero_point,
277
+ float output_scale,
278
+ uint8_t output_min,
279
+ uint8_t output_max,
280
+ uint32_t flags,
281
+ qnnp_operator_t* sigmoid);
282
+
283
+ enum qnnp_status qnnp_setup_sigmoid_nc_q8(
284
+ qnnp_operator_t sigmoid,
285
+ size_t batch_size,
286
+ const uint8_t* input,
287
+ size_t input_stride,
288
+ uint8_t* output,
289
+ size_t output_stride);
290
+
291
+ enum qnnp_status qnnp_create_leaky_relu_nc_q8(
292
+ size_t channels,
293
+ float negative_slope,
294
+ uint8_t input_zero_point,
295
+ float input_scale,
296
+ uint8_t output_zero_point,
297
+ float output_scale,
298
+ uint8_t output_min,
299
+ uint8_t output_max,
300
+ uint32_t flags,
301
+ qnnp_operator_t* leaky_relu);
302
+
303
+ enum qnnp_status qnnp_setup_leaky_relu_nc_q8(
304
+ qnnp_operator_t leaky_relu,
305
+ size_t batch_size,
306
+ const uint8_t* input,
307
+ size_t input_stride,
308
+ uint8_t* output,
309
+ size_t output_stride);
310
+
311
+ enum qnnp_status qnnp_create_softargmax_nc_q8(
312
+ size_t channels,
313
+ float input_scale,
314
+ uint8_t output_zero_point,
315
+ float output_scale,
316
+ uint32_t flags,
317
+ qnnp_operator_t* softargmax);
318
+
319
+ enum qnnp_status qnnp_setup_softargmax_nc_q8(
320
+ qnnp_operator_t softargmax,
321
+ size_t batch_size,
322
+ const uint8_t* input,
323
+ size_t input_stride,
324
+ uint8_t* output,
325
+ size_t output_stride);
326
+
327
+ enum qnnp_status qnnp_run_operator(
328
+ qnnp_operator_t op,
329
+ pthreadpool_t threadpool);
330
+
331
+ enum qnnp_status qnnp_delete_operator(
332
+ qnnp_operator_t op);
333
+
334
+ #ifdef __cplusplus
335
+ } /* extern "C" */
336
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/qnnpack_func.h ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdlib>
4
+ #include <qnnpack/operator.h>
5
+
6
+ namespace qnnpack {
7
+ class PrePackConvWeights final {
8
+ public:
9
+ PrePackConvWeights(
10
+ const pytorch_qnnp_operator_t convolution,
11
+ const uint8_t* kernel_zero_points,
12
+ const uint8_t* kernel,
13
+ const int32_t* bias);
14
+
15
+ void* getPackedWeights() const
16
+ {
17
+ return packed_weights_;
18
+ }
19
+
20
+ int64_t getOutputChannels() const
21
+ {
22
+ return output_channels_;
23
+ }
24
+
25
+ ~PrePackConvWeights()
26
+ {
27
+ if (packed_weights_ != nullptr) {
28
+ free(packed_weights_);
29
+ }
30
+ }
31
+
32
+ PrePackConvWeights() = delete;
33
+ PrePackConvWeights(const PrePackConvWeights&) = delete;
34
+ PrePackConvWeights& operator=(const PrePackConvWeights&) = delete;
35
+
36
+ private:
37
+ void* packed_weights_ = nullptr;
38
+ int64_t output_channels_;
39
+ };
40
+
41
+ class PackBMatrix final {
42
+ public:
43
+ PackBMatrix(
44
+ size_t input_channels,
45
+ size_t output_channels,
46
+ const uint8_t* kernel_zero_points,
47
+ const float* requantization_scale,
48
+ const uint8_t* kernel,
49
+ const int32_t* bias);
50
+
51
+ // This constructor is to be used for dynamic mode
52
+ // quantization. In dynamic mode, we dont yet support
53
+ // per channel quantization, and paying the cost of
54
+ // memory allocation for per channel zero point and
55
+ // requant scale will hurt performance.
56
+ PackBMatrix(
57
+ size_t input_channels,
58
+ size_t output_channels,
59
+ const uint8_t kernel_zero_point,
60
+ const float requantization_scale,
61
+ const uint8_t* kernel,
62
+ const int32_t* bias);
63
+
64
+ void* getPackedWeights() const
65
+ {
66
+ return packed_weights_;
67
+ }
68
+
69
+ void unpackWeights(
70
+ const uint8_t* kernel_zero_points,
71
+ int8_t* kernel
72
+ ) const;
73
+
74
+ size_t getInputChannels() const
75
+ {
76
+ return input_channels_;
77
+ }
78
+
79
+ size_t getOutputChannels() const
80
+ {
81
+ return output_channels_;
82
+ }
83
+
84
+ ~PackBMatrix()
85
+ {
86
+ if (packed_weights_ != nullptr) {
87
+ free(packed_weights_);
88
+ }
89
+ }
90
+
91
+ PackBMatrix() = delete;
92
+ PackBMatrix(const PackBMatrix&) = delete;
93
+ PackBMatrix& operator=(const PackBMatrix&) = delete;
94
+
95
+ private:
96
+ void* packed_weights_ = nullptr;
97
+ size_t input_channels_;
98
+ size_t output_channels_;
99
+ };
100
+
101
+ enum pytorch_qnnp_status qnnpackLinear(
102
+ const size_t batch_size,
103
+ const size_t input_channels,
104
+ const size_t output_channels,
105
+ const uint8_t input_zero_point,
106
+ const uint8_t* kernel_zero_points,
107
+ const float* requantization_scales,
108
+ const uint8_t output_zero_point,
109
+ const uint8_t output_min,
110
+ const uint8_t output_max,
111
+ const uint8_t* input,
112
+ const size_t input_stride,
113
+ void* packed_weights,
114
+ uint8_t* output,
115
+ const size_t output_stride,
116
+ pthreadpool_t threadpool);
117
+
118
+ enum pytorch_qnnp_status qnnpackConv(
119
+ const pytorch_qnnp_operator_t convolution,
120
+ void* packed_weights,
121
+ const size_t batch_size,
122
+ const size_t input_depth,
123
+ const size_t input_height,
124
+ const size_t input_width,
125
+ const uint8_t input_zero_point,
126
+ const uint8_t* input,
127
+ const uint8_t* kernel_zero_points,
128
+ const float* requantization_scales,
129
+ const uint8_t output_zero_point,
130
+ const uint8_t output_min,
131
+ const uint8_t output_max,
132
+ uint8_t* output,
133
+ pthreadpool_t threadpool);
134
+
135
+ enum pytorch_qnnp_status qnnpackDeConv(
136
+ const pytorch_qnnp_operator_t deconvolution,
137
+ void* packed_weights,
138
+ const size_t batch_size,
139
+ const size_t input_height,
140
+ const size_t input_width,
141
+ const uint8_t input_zero_point,
142
+ const uint8_t* input,
143
+ const uint8_t* kernel_zero_points,
144
+ const float* requantization_scales,
145
+ const uint8_t output_zero_point,
146
+ const uint8_t output_min,
147
+ const uint8_t output_max,
148
+ uint8_t* output,
149
+ pthreadpool_t threadpool);
150
+
151
+ enum pytorch_qnnp_status qnnpackLinearDynamic(
152
+ const size_t batch_size,
153
+ const size_t input_channels,
154
+ const size_t output_channels,
155
+ const uint8_t input_zero_point,
156
+ const uint8_t* kernel_zero_points,
157
+ const float* dequantization_scales,
158
+ const uint8_t* input,
159
+ const size_t input_stride,
160
+ void* packed_weights,
161
+ const float* bias,
162
+ float* output,
163
+ const size_t output_stride,
164
+ pthreadpool_t threadpool);
165
+
166
+ } // namespace qnnpack
llmeval-env/lib/python3.10/site-packages/torch/include/sleef.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifdef USE_CUDA
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDACachingAllocator.h>
5
+ #include <c10/cuda/CUDAException.h>
6
+ #include <c10/util/Logging.h>
7
+ #include <cuda_runtime_api.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <cstddef>
10
+ namespace torch {
11
+
12
+ TORCH_CUDA_CU_API bool CudaIPCCollect();
13
+
14
+ struct CudaIPCReceivedData final {
15
+ CudaIPCReceivedData() = default;
16
+ explicit CudaIPCReceivedData(std::shared_ptr<void> shared_ptr)
17
+ : shared_ptr_(std::move(shared_ptr)) {}
18
+ std::shared_ptr<void> shared_ptr_;
19
+ };
20
+
21
+ struct CudaIPCSentData final {
22
+ std::string handle_;
23
+ uint64_t offset_;
24
+ uint64_t* counter_ptr_; // Reference counter shared memory block
25
+ at::DataPtr original_ptr_; // Original mem allocation
26
+ cudaEvent_t event_; // Sync cuEventDestroy
27
+ bool event_sync_required_;
28
+ at::Device device_;
29
+
30
+ CudaIPCSentData(
31
+ std::string handle,
32
+ uint64_t offset,
33
+ uint64_t* counter_ptr,
34
+ at::Device device);
35
+ ~CudaIPCSentData();
36
+
37
+ uint64_t counter_value();
38
+ std::string handle() {
39
+ return handle_;
40
+ }
41
+ uint64_t offset() {
42
+ return offset_;
43
+ }
44
+ void set_original_ptr(at::DataPtr data_ptr) {
45
+ original_ptr_ = std::move(data_ptr);
46
+ }
47
+ };
48
+
49
+ TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData(
50
+ void* data,
51
+ at::Device device);
52
+
53
+ namespace {
54
+
55
+ inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000;
56
+ inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000;
57
+ // This was determined empirically that CUDA (v10.1 and below) have the limit
58
+ // on the number of recorded blocking interprocess events. It is around ~22,000.
59
+ // And to give us leeway, we picked 1000 as it gives us enough events to share
60
+ // tensors effectively.
61
+ inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000;
62
+
63
+ // All to be deleted data blocks with non zero reference counter goes there
64
+ struct CudaIPCSentDataLimbo final {
65
+ ~CudaIPCSentDataLimbo();
66
+ bool collect();
67
+ void add(std::unique_ptr<CudaIPCSentData> shared_block);
68
+ uint64_t size();
69
+
70
+ private:
71
+ // TODO: Can be changed to FIFO in order to avoid full traverse on every
72
+ // collect()
73
+ std::vector<std::unique_ptr<CudaIPCSentData>> shared_blocks_;
74
+ std::mutex limbo_mutex_;
75
+ };
76
+
77
+ struct CudaIPCRefCountersFile final {
78
+ CudaIPCRefCountersFile(
79
+ std::string handle,
80
+ uint64_t size,
81
+ at::DataPtr data_ptr)
82
+ : size_(size),
83
+
84
+ handle_(std::move(handle)),
85
+ refcounted_shared_mem_(std::move(data_ptr)) {}
86
+
87
+ uint64_t* counter_ptr() {
88
+ return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
89
+ }
90
+
91
+ void set_counter(uint64_t value) {
92
+ *counter_ptr() = value;
93
+ }
94
+
95
+ bool have_offsets() {
96
+ return next_offset_ < size_;
97
+ }
98
+
99
+ bool offsets_in_use() {
100
+ return used_slots_;
101
+ }
102
+
103
+ uint64_t get_offset() {
104
+ return next_offset_;
105
+ }
106
+
107
+ void rotate_offset() {
108
+ next_offset_++;
109
+ used_slots_++;
110
+ }
111
+
112
+ void return_offset(uint64_t offset /* unused */) {
113
+ used_slots_--;
114
+ }
115
+
116
+ std::string handle() {
117
+ return handle_;
118
+ }
119
+
120
+ private:
121
+ uint64_t next_offset_{0};
122
+ uint64_t size_;
123
+ uint64_t used_slots_{0};
124
+ std::string handle_;
125
+ at::DataPtr refcounted_shared_mem_;
126
+ };
127
+
128
+ } // namespace
129
+ } // namespace torch
130
+
131
+ namespace c10 {
132
+ namespace {
133
+ class CudaIPCCollectCallback : public FreeMemoryCallback {
134
+ public:
135
+ bool Execute() override {
136
+ return torch::CudaIPCCollect();
137
+ }
138
+ };
139
+ } // namespace
140
+
141
+ } // namespace c10
142
+
143
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Dtype.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ constexpr int DTYPE_NAME_LEN = 64;
8
+
9
+ struct TORCH_API THPDtype {
10
+ PyObject_HEAD at::ScalarType scalar_type;
11
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
12
+ char name[DTYPE_NAME_LEN + 1];
13
+ };
14
+
15
+ TORCH_API extern PyTypeObject THPDtypeType;
16
+
17
+ inline bool THPDtype_Check(PyObject* obj) {
18
+ return Py_TYPE(obj) == &THPDtypeType;
19
+ }
20
+
21
+ inline bool THPPythonScalarType_Check(PyObject* obj) {
22
+ return obj == (PyObject*)(&PyFloat_Type) ||
23
+ obj == (PyObject*)(&PyBool_Type) || obj == (PyObject*)(&PyLong_Type);
24
+ }
25
+
26
+ TORCH_API PyObject* THPDtype_New(
27
+ at::ScalarType scalar_type,
28
+ const std::string& name);
29
+
30
+ void THPDtype_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/Layout.h>
6
+
7
+ #include <string>
8
+
9
+ const int LAYOUT_NAME_LEN = 64;
10
+
11
+ struct THPLayout {
12
+ PyObject_HEAD at::Layout layout;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[LAYOUT_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPLayoutType;
18
+
19
+ inline bool THPLayout_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPLayoutType;
21
+ }
22
+
23
+ PyObject* THPLayout_New(at::Layout layout, const std::string& name);
24
+
25
+ void THPLayout_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/PyInterpreter.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ TORCH_PYTHON_API c10::impl::PyInterpreter* getPyInterpreter();
7
+ TORCH_PYTHON_API bool isMainPyInterpreter();
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/QScheme.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <c10/core/QScheme.h>
6
+
7
+ #include <string>
8
+
9
+ constexpr int QSCHEME_NAME_LEN = 64;
10
+
11
+ struct THPQScheme {
12
+ PyObject_HEAD at::QScheme qscheme;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[QSCHEME_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPQSchemeType;
18
+
19
+ inline bool THPQScheme_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPQSchemeType;
21
+ }
22
+
23
+ PyObject* THPQScheme_New(at::QScheme qscheme, const std::string& name);
24
+
25
+ void THPQScheme_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/StorageSharing.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STORAGE_SHARING_INC
2
+ #define THP_STORAGE_SHARING_INC
3
+
4
+ #include <Python.h>
5
+
6
+ PyMethodDef* THPStorage_getSharingMethods();
7
+
8
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/Stream.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_STREAM_INC
2
+ #define THP_STREAM_INC
3
+
4
+ #include <c10/core/Stream.h>
5
+ #include <c10/macros/Export.h>
6
+ #include <torch/csrc/python_headers.h>
7
+
8
+ struct THPStream {
9
+ PyObject_HEAD int64_t stream_id;
10
+ int64_t device_type;
11
+ int64_t device_index;
12
+ };
13
+ extern TORCH_API PyTypeObject* THPStreamClass;
14
+
15
+ void THPStream_init(PyObject* module);
16
+
17
+ inline bool THPStream_Check(PyObject* obj) {
18
+ return THPStreamClass && PyObject_IsInstance(obj, (PyObject*)THPStreamClass);
19
+ }
20
+
21
+ PyObject* THPStream_Wrap(const c10::Stream& stream);
22
+
23
+ #endif // THP_STREAM_INC
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y)
4
+ #define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y
5
+
6
+ #define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z)
7
+ #define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z
8
+
9
+ #define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w)
10
+ #define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w
11
+
12
+ #define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y)
13
+ #define TH_CONCAT_2_EXPAND(x, y) x##y
14
+
15
+ #define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z)
16
+ #define TH_CONCAT_3_EXPAND(x, y, z) x##y##z
17
+
18
+ #define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w
19
+ #define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w)
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_H
2
+ #define THP_H
3
+
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ // Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/
8
+ // define PyInt_* macros for Python 3.x. NB: We must include Python.h first,
9
+ // otherwise we'll incorrectly conclude PyInt_Check isn't defined!
10
+ #ifndef PyInt_Check
11
+ #define PyInt_Check PyLong_Check
12
+ #define PyInt_FromLong PyLong_FromLong
13
+ #define PyInt_AsLong PyLong_AsLong
14
+ #define PyInt_Type PyLong_Type
15
+ #endif
16
+
17
+ #include <torch/csrc/Exceptions.h>
18
+ #include <torch/csrc/Generator.h>
19
+ #include <torch/csrc/Module.h>
20
+ #include <torch/csrc/Size.h>
21
+ #include <torch/csrc/Storage.h>
22
+ #include <torch/csrc/Types.h>
23
+ #include <torch/csrc/utils.h> // This requires defined Storage and Tensor types
24
+ #include <torch/csrc/utils/byte_order.h>
25
+
26
+ #include <torch/csrc/serialization.h>
27
+
28
+ #include <torch/csrc/autograd/python_autograd.h>
29
+
30
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/TypeInfo.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/ATen.h>
6
+
7
+ struct THPDTypeInfo {
8
+ PyObject_HEAD at::ScalarType type;
9
+ };
10
+
11
+ struct THPFInfo : THPDTypeInfo {};
12
+
13
+ struct THPIInfo : THPDTypeInfo {};
14
+
15
+ extern PyTypeObject THPFInfoType;
16
+ extern PyTypeObject THPIInfoType;
17
+
18
+ inline bool THPFInfo_Check(PyObject* obj) {
19
+ return Py_TYPE(obj) == &THPFInfoType;
20
+ }
21
+
22
+ inline bool THPIInfo_Check(PyObject* obj) {
23
+ return Py_TYPE(obj) == &THPIInfoType;
24
+ }
25
+
26
+ void THPDTypeInfo_init(PyObject* module);
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/InferenceMode.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch::autograd {
7
+
8
+ using InferenceMode = c10::InferenceMode;
9
+
10
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+
5
+ #include <ATen/core/boxing/KernelFunction.h>
6
+ #include <ATen/core/dispatch/Dispatcher.h>
7
+
8
+ #include <torch/csrc/autograd/edge.h>
9
+ #include <torch/csrc/autograd/function.h>
10
+ #include <torch/csrc/autograd/functions/basic_ops.h>
11
+ #include <torch/csrc/autograd/functions/tensor.h>
12
+ #include <torch/csrc/autograd/grad_mode.h>
13
+ #include <torch/csrc/autograd/saved_variable.h>
14
+ #include <torch/csrc/autograd/variable.h>
15
+
16
+ #include <torch/csrc/autograd/functions/utils.h>
17
+ #include <torch/csrc/autograd/jit_decomp_interface.h>
18
+ #include <torch/csrc/utils/variadic.h>
19
+
20
+ #include <cstddef>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <utility>
24
+ #include <vector>
25
+
26
+ #ifdef _MSC_VER
27
+ #ifdef Type
28
+ #undef Type
29
+ #endif
30
+ #endif
31
+
32
+ namespace torch {
33
+ namespace autograd {
34
+ enum class can_mutate_inplace_result {
35
+ success,
36
+ non_default_backward_view,
37
+ view_of_leaf,
38
+ is_leaf,
39
+ };
40
+
41
+ // The requires_grad argument is used to know if the inplace operation needs
42
+ // gradient to be setup for it.
43
+ // In particular, we can have tensor.requires_grad() != requires_grad when
44
+ // writing a Tensor that requires gradients inplace into a Tensor that does not
45
+ // require gradients: a = torch.rand(2) b = torch.rand(2, requires_grad=True)
46
+ // a.copy_(b)
47
+ inline can_mutate_inplace_result can_mutate_inplace(
48
+ const at::Tensor& tensor,
49
+ bool requires_grad) {
50
+ if (!requires_grad || !GradMode::is_enabled()) {
51
+ return can_mutate_inplace_result::success;
52
+ }
53
+ auto diff_view_meta = impl::get_view_autograd_meta(tensor);
54
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
55
+ if (diff_view_meta->get_creation_meta() != CreationMeta::DEFAULT) {
56
+ return can_mutate_inplace_result::non_default_backward_view;
57
+ }
58
+ if (tensor.requires_grad() && tensor._base().is_leaf()) {
59
+ return can_mutate_inplace_result::view_of_leaf;
60
+ }
61
+ }
62
+ if (tensor.requires_grad() && tensor.is_leaf()) {
63
+ return can_mutate_inplace_result::is_leaf;
64
+ }
65
+ return can_mutate_inplace_result::success;
66
+ }
67
+
68
+ inline void check_inplace(const at::Tensor& tensor, bool requires_grad) {
69
+ switch (can_mutate_inplace(tensor, requires_grad)) {
70
+ case can_mutate_inplace_result::success:
71
+ return;
72
+ case can_mutate_inplace_result::non_default_backward_view: {
73
+ return handle_view_on_rebase(impl::get_view_autograd_meta(tensor));
74
+ }
75
+ case can_mutate_inplace_result::view_of_leaf:
76
+ TORCH_CHECK(
77
+ false,
78
+ "a view of a leaf Variable that requires grad is being used in an in-place operation.");
79
+ break;
80
+
81
+ case can_mutate_inplace_result::is_leaf:
82
+ TORCH_CHECK(
83
+ false,
84
+ "a leaf Variable that requires grad is being used in an in-place operation.");
85
+ break;
86
+ }
87
+ TORCH_INTERNAL_ASSERT(false);
88
+ }
89
+
90
+ inline void check_inplace(at::ITensorListRef tensors, bool requires_grad) {
91
+ for (const auto& tensor : tensors) {
92
+ check_inplace(tensor, requires_grad);
93
+ }
94
+ }
95
+
96
+ inline void throw_error_out_requires_grad(const char* name) {
97
+ AT_ERROR(
98
+ name,
99
+ "(): functions with out=... arguments don't support automatic differentiation, "
100
+ "but one of the arguments requires grad.");
101
+ }
102
+
103
+ inline void throw_error_for_complex_autograd(
104
+ const at::Tensor& tensor,
105
+ const char* name) {
106
+ if (tensor.requires_grad()) {
107
+ TORCH_CHECK(
108
+ !tensor.is_complex(),
109
+ name,
110
+ " does not support automatic differentiation for outputs with complex dtype.");
111
+ }
112
+ }
113
+
114
+ inline void throw_error_if_base_and_tensor_are_same(
115
+ const at::Tensor& base,
116
+ const at::Tensor& tensor) {
117
+ TORCH_CHECK(
118
+ base.unsafeGetTensorImpl() != tensor.unsafeGetTensorImpl(),
119
+ "View operation returned a tensor that is the same as the input base tensor. This "
120
+ "is no longer allowed; you must explicitly create a new tensor (e.g., using .detach()). "
121
+ "As a user, you could have made a mistake implementing __torch_dispatch__ or a Python "
122
+ "operator decomposition or meta registration; if that's not the case, please "
123
+ "report a bug to PyTorch or the backend you are using.");
124
+ }
125
+
126
+ inline void throw_error_for_complex_autograd(
127
+ at::ITensorListRef tensorlist,
128
+ const char* name) {
129
+ for (const auto& tensor : tensorlist) {
130
+ throw_error_for_complex_autograd(tensor, name);
131
+ }
132
+ }
133
+
134
+ // TODO: Blegh, bare references
135
+
136
+ inline void rebase_history(const Variable& var, std::shared_ptr<Node> grad_fn) {
137
+ if (grad_fn && var.defined()) {
138
+ grad_fn->add_input_metadata(var);
139
+ impl::rebase_history(var, {std::move(grad_fn), 0});
140
+ }
141
+ }
142
+
143
+ inline void rebase_history(
144
+ const std::vector<Variable>& vars,
145
+ const std::shared_ptr<Node>& grad_fn) {
146
+ if (grad_fn) {
147
+ for (auto& var : vars) {
148
+ if (var.defined()) {
149
+ auto output_nr = grad_fn->add_input_metadata(var);
150
+ impl::rebase_history(var, {grad_fn, output_nr});
151
+ } else {
152
+ grad_fn->add_input_metadata(Node::undefined_input());
153
+ }
154
+ }
155
+ }
156
+ }
157
+
158
+ inline void increment_version(const at::Tensor& t) {
159
+ impl::bump_version(t);
160
+ }
161
+
162
+ struct Flatten : IterArgs<Flatten> {
163
+ Flatten(variable_list& out) : out(out) {}
164
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
165
+ variable_list& out;
166
+ void operator()(const at::Tensor& x) {
167
+ out.emplace_back(x);
168
+ }
169
+ void operator()(const c10::optional<at::Tensor>& x) {
170
+ if (x.has_value())
171
+ out.emplace_back(x.value());
172
+ }
173
+ void operator()(at::ArrayRef<at::Tensor> xs) {
174
+ out.insert(out.end(), xs.begin(), xs.end());
175
+ }
176
+ };
177
+
178
+ template <typename... Args>
179
+ inline variable_list flatten_tensor_args(Args&&... args) {
180
+ variable_list out;
181
+ out.reserve(count_tensors(std::forward<Args>(args)...));
182
+ Flatten(out).apply(std::forward<Args>(args)...);
183
+ return out; // RVO
184
+ }
185
+
186
+ // See NOTE [ Autograd View Variables ] for details.
187
+ inline at::Tensor as_view(
188
+ const at::Tensor& base,
189
+ const at::Tensor& tensor,
190
+ bool is_bw_differentiable,
191
+ bool is_fw_differentiable,
192
+ std::unique_ptr<ViewFunc> view_func = nullptr,
193
+ std::function<at::Tensor(const at::Tensor&)> rev_view_func = nullptr,
194
+ CreationMeta creation_meta = CreationMeta::DEFAULT,
195
+ bool allow_tensor_metadata_change = true) {
196
+ // Note [View of inference tensor]
197
+ // For inference tensor this code can only be hit outside InferenceMode
198
+ // since ADInplaceOrView is in the default_included_set.
199
+ // If Inplace and View were separate dispatch keys we can just put Inplace
200
+ // in the default_included_set, so that view ops on inference tensor doesn't
201
+ // have to go through as_view even outside InferenceMode.
202
+ if (base.is_inference())
203
+ return tensor;
204
+
205
+ auto diff_view_meta = torch::autograd::impl::get_view_autograd_meta(base);
206
+
207
+ // To speed up the most common case, we specially handle when both the forward
208
+ // and backward view infos are the same, and so a single shared ViewInfo can
209
+ // be used for both of them.
210
+ if ((!diff_view_meta || diff_view_meta->shared_view_info()) &&
211
+ is_bw_differentiable && is_fw_differentiable) {
212
+ throw_error_if_base_and_tensor_are_same(base, tensor);
213
+ if (diff_view_meta) {
214
+ creation_meta = propagate_creation_meta(
215
+ diff_view_meta->get_creation_meta(), creation_meta);
216
+ return make_variable_differentiable_view(
217
+ tensor,
218
+ diff_view_meta->get_backward_view().chain(
219
+ base, tensor, std::move(view_func), std::move(rev_view_func)),
220
+ c10::nullopt,
221
+ /*shared_view_info*/ true,
222
+ creation_meta,
223
+ allow_tensor_metadata_change);
224
+ } else {
225
+ return make_variable_differentiable_view(
226
+ tensor,
227
+ ViewInfo(base, std::move(view_func), std::move(rev_view_func)),
228
+ c10::nullopt,
229
+ /*shared_view_info*/ true,
230
+ creation_meta,
231
+ allow_tensor_metadata_change);
232
+ }
233
+ }
234
+
235
+ // If they cannot be shared, create the required view infos
236
+ c10::optional<ViewInfo> new_bw_info;
237
+ c10::optional<ViewInfo> new_fw_info;
238
+
239
+ if (is_bw_differentiable) {
240
+ auto bw_view_func = view_func ? view_func->clone_and_set() : nullptr;
241
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
242
+ const auto& base_bw_info = diff_view_meta->get_backward_view();
243
+ new_bw_info = base_bw_info.chain(
244
+ base, tensor, std::move(bw_view_func), rev_view_func);
245
+ } else {
246
+ new_bw_info = ViewInfo(base, std::move(bw_view_func), rev_view_func);
247
+ }
248
+ } else {
249
+ TORCH_CHECK(
250
+ creation_meta == CreationMeta::DEFAULT,
251
+ "Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT");
252
+ }
253
+
254
+ if (is_fw_differentiable) {
255
+ // Check if base is a forward differentiable view
256
+ if (diff_view_meta && diff_view_meta->has_fw_view()) {
257
+ const auto& base_fw_info = diff_view_meta->get_forward_view();
258
+ new_fw_info = base_fw_info.chain(
259
+ base, tensor, std::move(view_func), std::move(rev_view_func));
260
+ } else {
261
+ new_fw_info =
262
+ ViewInfo(base, std::move(view_func), std::move(rev_view_func));
263
+ }
264
+ }
265
+
266
+ if (is_fw_differentiable || is_bw_differentiable) {
267
+ if (diff_view_meta && diff_view_meta->has_bw_view()) {
268
+ creation_meta = propagate_creation_meta(
269
+ diff_view_meta->get_creation_meta(), creation_meta);
270
+ }
271
+ throw_error_if_base_and_tensor_are_same(base, tensor);
272
+ return make_variable_differentiable_view(
273
+ tensor,
274
+ std::move(new_bw_info),
275
+ std::move(new_fw_info),
276
+ /*shared_view_info*/ false,
277
+ creation_meta,
278
+ allow_tensor_metadata_change);
279
+ } else {
280
+ return make_variable_non_differentiable_view(
281
+ base, tensor, allow_tensor_metadata_change);
282
+ }
283
+ }
284
+
285
+ inline void check_no_requires_grad(
286
+ const at::Tensor& tensor,
287
+ const char* name,
288
+ const char* fn_name = "",
289
+ bool check_grad_mode = true) {
290
+ TORCH_CHECK(
291
+ !(tensor.defined() && tensor.requires_grad()) ||
292
+ !(check_grad_mode && GradMode::is_enabled()),
293
+ "The function '",
294
+ fn_name,
295
+ "' is not differentiable with respect to argument '",
296
+ name,
297
+ "'. This input cannot have requires_grad True.");
298
+ }
299
+
300
+ inline void check_no_requires_grad(
301
+ const c10::optional<at::Tensor>& tensor,
302
+ const char* name,
303
+ const char* fn_name = "") {
304
+ if (tensor.has_value()) {
305
+ check_no_requires_grad(*tensor, name, fn_name);
306
+ }
307
+ }
308
+
309
+ inline void check_no_requires_grad(
310
+ at::ITensorListRef tensors,
311
+ const char* name,
312
+ const char* fn_name = "") {
313
+ // GradMode check is expensive, so check it only once for TensorLists
314
+ if (!GradMode::is_enabled()) {
315
+ return;
316
+ }
317
+ for (auto& tensor : tensors) {
318
+ check_no_requires_grad(tensor, name, fn_name, /*check_grad_mode*/ false);
319
+ }
320
+ }
321
+
322
+ inline void check_no_requires_grad(
323
+ const c10::List<c10::optional<at::Tensor>>& tensors,
324
+ const char* name,
325
+ const char* fn_name = "") {
326
+ // GradMode check is expensive, so check it only once for TensorLists
327
+ if (!GradMode::is_enabled()) {
328
+ return;
329
+ }
330
+ for (c10::optional<at::Tensor> tensor : tensors) {
331
+ if (tensor.has_value()) {
332
+ check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false);
333
+ }
334
+ }
335
+ }
336
+
337
+ // Assumed that saved tensor lists are never inplace outputs
338
+ inline std::vector<SavedVariable> make_saved_variable_list(
339
+ at::ITensorListRef tensors,
340
+ const bool is_output = false) {
341
+ return fmap(tensors, [&is_output](const at::Tensor& tensor) -> SavedVariable {
342
+ return SavedVariable{tensor, is_output /* is output */};
343
+ });
344
+ }
345
+
346
+ // Assumed that saved tensor lists are never inplace outputs
347
+ inline std::vector<SavedVariable> make_saved_variable_list(
348
+ const c10::List<c10::optional<at::Tensor>>& tensors,
349
+ const bool is_output = false) {
350
+ return fmap(
351
+ tensors,
352
+ [&is_output](const c10::optional<at::Tensor>& tensor) -> SavedVariable {
353
+ if (tensor.has_value()) {
354
+ return SavedVariable{*tensor, is_output /* is output */};
355
+ } else {
356
+ return SavedVariable{at::Tensor(), is_output /* is output */};
357
+ }
358
+ });
359
+ }
360
+
361
+ inline std::vector<std::vector<int64_t>> to_args_sizes(
362
+ at::ITensorListRef tensors) {
363
+ std::vector<std::vector<int64_t>> args_sizes(tensors.size());
364
+ size_t i = 0;
365
+ for (const auto& t : tensors) {
366
+ args_sizes[i++] = t.sizes().vec();
367
+ }
368
+ return args_sizes;
369
+ }
370
+
371
+ inline std::vector<std::vector<c10::SymInt>> to_args_sizes_symint(
372
+ at::ITensorListRef tensors) {
373
+ std::vector<std::vector<c10::SymInt>> args_sizes(tensors.size());
374
+ size_t i = 0;
375
+ for (const auto& t : tensors) {
376
+ args_sizes[i++] = t.sym_sizes().vec();
377
+ }
378
+ return args_sizes;
379
+ }
380
+
381
+ inline std::vector<c10::ScalarType> to_args_scalartypes(
382
+ at::ITensorListRef tensors) {
383
+ std::vector<c10::ScalarType> args_scalartypes(tensors.size());
384
+ size_t i = 0;
385
+ for (const auto& t : tensors) {
386
+ args_scalartypes[i++] = t.scalar_type();
387
+ }
388
+ return args_scalartypes;
389
+ }
390
+
391
+ namespace impl {
392
+
393
+ namespace {
394
+
395
+ // If run_jit_decomposition were not a member function, we would be able
396
+ // to pass this as a template parameter to c10::Boxedkernel::makeFromFunction.
397
+ // However, member functions cannot be passed this way - instead we wrap our
398
+ // call in this functor so it can be passed to c10::BoxedKernel::makeFromFunctor
399
+ class WrapperFunctor final : public c10::OperatorKernel {
400
+ public:
401
+ WrapperFunctor(JitDecompInterface* impl) : impl_(impl){};
402
+
403
+ void operator()(
404
+ const c10::OperatorHandle& op,
405
+ c10::DispatchKeySet ks,
406
+ torch::jit::Stack* stack) {
407
+ impl_->run_jit_decomposition(op, stack);
408
+ }
409
+ JitDecompInterface* impl_;
410
+ };
411
+
412
+ } // namespace
413
+
414
+ template <class Return, class... Args>
415
+ Return run_jit_decomposition_with_args_for_jvp(
416
+ c10::string_view name,
417
+ const c10::OperatorHandle& opHandle,
418
+ c10::DispatchKeySet dispatchKeySet,
419
+ Args&&... args) {
420
+ // see NOTE: [Jit Decomposition Interface]
421
+ JitDecompInterface* impl = getJitDecompImpl();
422
+
423
+ TORCH_CHECK_NOT_IMPLEMENTED(
424
+ impl && impl->has_jit_decomposition(opHandle.schema()),
425
+ "Trying to use forward AD with ",
426
+ name,
427
+ " that does not support it because it has not been implemented yet.\nPlease file an issue "
428
+ "to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
429
+ "so that we can prioritize its implementation.\n"
430
+ "Note that forward AD support for some operators require PyTorch to be built with "
431
+ "TorchScript and for JIT to be enabled. "
432
+ "If the environment var PYTORCH_JIT=0 is set or if the library is not built with TorchScript, "
433
+ "some operators may no longer be used with forward AD.");
434
+
435
+ return c10::KernelFunction::makeFromBoxedKernel(
436
+ c10::BoxedKernel::makeFromFunctor(
437
+ std::make_unique<WrapperFunctor>(impl)))
438
+ .call<Return, Args...>(
439
+ opHandle, dispatchKeySet, std::forward<Args>(args)...);
440
+ }
441
+
442
+ } // namespace impl
443
+
444
+ } // namespace autograd
445
+ } // namespace torch
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/library.h>
4
+
5
+ namespace torch::autograd {
6
+
7
+ // Default DispatchKey::Autograd fallback for built-in operators.
8
+ // Can be registered for custom operators.
9
+ TORCH_API torch::CppFunction autogradNotImplementedFallback();
10
+
11
+ // Default DispatchKey::AdInplaceOrView fallback for built-in operators
12
+ // Can be registered for custom operators.
13
+ TORCH_API torch::CppFunction autogradNotImplementedInplaceOrViewFallback();
14
+
15
+ // Default DispatchKey::Autograd fallback for all other operators (i.e. custom
16
+ // operators)
17
+ TORCH_API torch::CppFunction basicAutogradNotImplementedFallback();
18
+
19
+ enum class AutogradFallbackMode {
20
+ Nothing, // Fallback is a redispatch
21
+ Warn, // Fallback raises a warning if backward is called
22
+ Error, // Fallback raises an error if backward is called
23
+ };
24
+
25
+ // Change the behavior of "basicAutogradNotImplementedFallback"
26
+ // In Python this is:
27
+ // - torch._C._set_autograd_fallback_mode(str) -> None
28
+ // - torch._C._get_autograd_fallback_mode() -> str
29
+ TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
30
+ TORCH_API AutogradFallbackMode getAutogradFallbackMode();
31
+
32
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue.h>
4
+ #include <c10/core/SymInt.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/autograd/variable_info.h>
10
+ #include <vector>
11
+
12
+ namespace torch::autograd {
13
+
14
+ using optional_variable_list = std::vector<c10::optional<Variable>>;
15
+ using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
16
+ using _view_as_self_fn_t = std::function<at::Tensor(at::Tensor)>;
17
+
18
+ TORCH_API std::vector<c10::optional<Variable>> _wrap_outputs(
19
+ const variable_list& input_vars,
20
+ const std::unordered_set<at::TensorImpl*>& non_differentiable,
21
+ const std::unordered_set<at::TensorImpl*>& dirty_inputs,
22
+ const at::ArrayRef<c10::optional<Variable>> raw_outputs,
23
+ const std::shared_ptr<Node>& cdata,
24
+ const _jvp_fn_t& jvp_user_function,
25
+ const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context,
26
+ const _view_as_self_fn_t& view_as_self_fn);
27
+
28
+ TORCH_API void check_variable_result(
29
+ const at::TensorBase& original,
30
+ const at::TensorBase& result,
31
+ const std::string& hook_name);
32
+
33
+ // Get the return type of the forward function of the custom Function class X
34
+ template <typename X, typename... Args>
35
+ using forward_t = decltype(X::forward(nullptr, std::declval<Args>()...));
36
+
37
+ /// To use custom autograd operations, implement a Function subclass with
38
+ /// static forward and backward functions:
39
+ ///
40
+ /// `forward` can take as many arguments as you want and should return either a
41
+ /// variable list or a Variable. Use of any direct Variable arguments will be
42
+ /// registered in the graph but no vectors/sets or any other data structures
43
+ /// will be traversed. You can use c10::optional<Tensor> as one of the arguments
44
+ /// and it will be registered as a variable in the graph if the argument has a
45
+ /// value. It should take a pointer to `torch::autograd::AutogradContext` as the
46
+ /// first argument. Variables can be saved in the `ctx` using
47
+ /// `ctx->save_for_backward`
48
+ /// (see `torch::autograd::AutogradContext::save_for_backward`) and other data
49
+ /// can be saved in the `ctx->saved_data` map
50
+ /// (see `torch::autograd::AutogradContext::saved_data`)
51
+ /// in the form of `<std::string, at::IValue>` pairs.
52
+ ///
53
+ /// `backward` should take a pointer to `torch::autograd::AutogradContext`
54
+ /// and a variable list containing as many Variables as there were outputs from
55
+ /// `forward` as arguments. It should return as many Variables as there were
56
+ /// inputs with each of them containing the gradient w.r.t. its corresponding
57
+ /// input. Variables saved in `forward` can be accessed with
58
+ /// `ctx->get_saved_variables` (see
59
+ /// `torch::autograd::AutogradContext::get_saved_variables`) and other saved
60
+ /// data can be accessed from `ctx->saved_data`.
61
+ ///
62
+ /// For example:
63
+ /// ```
64
+ /// class MyFunction : public Function<MyFunction> {
65
+ /// public:
66
+ /// static variable_list forward(AutogradContext *ctx, int n, Variable var) {
67
+ /// // Save data for backward in context
68
+ /// ctx->saved_data["n"] = n;
69
+ /// var.mul_(2);
70
+ /// // Mark var as modified by inplace operation
71
+ /// ctx->mark_dirty({var});
72
+ /// return {var};
73
+ /// }
74
+ ///
75
+ /// static variable_list backward(AutogradContext *ctx, variable_list
76
+ /// grad_output) {
77
+ /// // Use data saved in forward
78
+ /// auto n = ctx->saved_data["n"].toInt();
79
+ /// return {grad_output[0]*n};
80
+ /// }
81
+ /// };
82
+ /// ```
83
+ ///
84
+ /// To use `MyFunction`:
85
+ /// ```
86
+ /// Variable x;
87
+ /// auto y = MyFunction::apply(6, x);
88
+ /// // Example backward call
89
+ /// y[0].sum().backward();
90
+ /// ```
91
+ template <class T>
92
+ struct TORCH_API Function {
93
+ // We need to use a different template parameter than T here because T will
94
+ // inherit from Function, and when Function<T> is instantiated, T::forward
95
+ // is not declared yet.
96
+ // The enable_if check is to ensure that the user doesn't explicitly provide
97
+ // the parameter X.
98
+ template <typename X = T, typename... Args>
99
+ static auto apply(Args&&... args)
100
+ -> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>>;
101
+ };
102
+
103
+ /// Context to save information during `forward` that can be accessed in
104
+ /// `backward` in custom autograd operations (see `torch::autograd::Function`
105
+ /// for details).
106
+ struct TORCH_API AutogradContext {
107
+ AutogradContext() = default;
108
+ AutogradContext(const AutogradContext& other) = delete;
109
+ AutogradContext& operator=(const AutogradContext& other) = delete;
110
+
111
+ /// Can be used to save non-variable data for `backward`.
112
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
113
+ ska::flat_hash_map<std::string, at::IValue> saved_data;
114
+
115
+ /// Saves the list of variables for a future call to `backward`. This
116
+ /// should be called at most once from inside of `forward`.
117
+ void save_for_backward(variable_list to_save);
118
+ /// Marks variables in the list as modified in an in-place operation. This
119
+ /// should be called at most once from inside of `forward` and all arguments
120
+ /// should be inputs.
121
+ void mark_dirty(const variable_list& inputs);
122
+ /// Marks outputs in the list as not requiring gradients. This should be
123
+ /// called at most once from inside of `forward` and all arguments should be
124
+ /// outputs.
125
+ void mark_non_differentiable(const variable_list& outputs);
126
+ // Sets whether undefined output grad tensors should be expanded to tensors
127
+ // full of zeros before calling backward function. Default value is true.
128
+ void set_materialize_grads(bool value);
129
+
130
+ /// Get the list of variables that were saved in `forward` using
131
+ /// `save_for_backward()`. Before returning them to the user, a check is made
132
+ /// to ensure that they were not modified by any in-place operations.
133
+ variable_list get_saved_variables() const;
134
+ const std::unordered_set<at::TensorImpl*>& get_and_bump_dirty() const;
135
+ const std::unordered_set<at::TensorImpl*>& get_non_differentiable() const;
136
+
137
+ /// Expose the Node's `task_should_compute_output` method to the cpp
138
+ /// custom autograd Function as `needs_input_grad`.
139
+ bool needs_input_grad(size_t output_edge_index) const;
140
+ bool needs_input_grad(std::initializer_list<IndexRange> idxs) const;
141
+
142
+ private:
143
+ std::unordered_set<at::TensorImpl*> non_differentiable_;
144
+ std::unordered_set<at::TensorImpl*> dirty_inputs_;
145
+ std::vector<torch::autograd::SavedVariable> saved_variables_;
146
+ variable_list to_save_;
147
+ bool materialize_grads_{true};
148
+
149
+ // The CppNode in the autograd graph that owns this AutogradContext. We need a
150
+ // weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
151
+ // will always be alive when we want to use it.
152
+ std::weak_ptr<Node> grad_fn_;
153
+ bool has_freed_buffers_{false};
154
+
155
+ void save_variables();
156
+
157
+ template <class T>
158
+ friend struct CppNode;
159
+ };
160
+
161
+ // CppNode<T> is the Node in the autograd graph that represents the user defined
162
+ // backward function for Function<T>. Calls to CppNode::apply are forward to
163
+ // T::backward().
164
+ template <class T>
165
+ struct CppNode : public Node {
166
+ variable_list apply(variable_list&& inputs) override;
167
+ AutogradContext ctx_;
168
+ std::vector<bool> is_variable_input_;
169
+ std::vector<VariableInfo> input_info_;
170
+ std::vector<VariableInfo> output_info_;
171
+
172
+ void release_variables() override;
173
+
174
+ void set_ctx_grad_fn(const std::shared_ptr<Node>& node);
175
+ void save_variables_to_ctx();
176
+ };
177
+
178
+ struct ExtractVariables : IterArgs<ExtractVariables> {
179
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
180
+ std::vector<bool>& is_var_;
181
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
182
+ variable_list& list_;
183
+ ExtractVariables(std::vector<bool>& is_var, variable_list& list)
184
+ : is_var_(is_var), list_(list) {}
185
+ void operator()(const c10::optional<at::Tensor>& x) {
186
+ // NOLINTNEXTLINE(bugprone-branch-clone)
187
+ if (x.has_value() && x.value().defined()) {
188
+ is_var_.push_back(true);
189
+ list_.emplace_back(x.value());
190
+ } else {
191
+ is_var_.push_back(false);
192
+ }
193
+ }
194
+ void operator()(const at::Tensor& x) {
195
+ is_var_.push_back(true);
196
+ list_.emplace_back(x);
197
+ }
198
+ void operator()(const at::TensorList& list) {
199
+ for (const at::Tensor& x : list) {
200
+ is_var_.push_back(true);
201
+ list_.emplace_back(x);
202
+ }
203
+ }
204
+ template <typename T>
205
+ void operator()(const T& x) {
206
+ is_var_.push_back(false);
207
+ }
208
+ };
209
+
210
+ template <typename... Args>
211
+ inline void extract_vars(
212
+ std::vector<bool>& is_var,
213
+ variable_list& list,
214
+ Args&&... args) {
215
+ ExtractVariables(is_var, list).apply(std::forward<Args>(args)...);
216
+ }
217
+
218
+ template <typename T>
219
+ std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
220
+ std::vector<c10::optional<Variable>>& output_list) {
221
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
222
+ variable_list result;
223
+ std::transform(
224
+ output_list.begin(),
225
+ output_list.end(),
226
+ std::back_inserter(result),
227
+ [](const c10::optional<Variable>& var) { return *var; });
228
+ return result;
229
+ }
230
+
231
+ template <typename T>
232
+ std::enable_if_t<std::is_same_v<T, Variable>, T> to_output_type(
233
+ std::vector<c10::optional<Variable>>& output_list) {
234
+ return *output_list[0];
235
+ }
236
+
237
+ inline std::vector<c10::optional<Variable>> to_optional(Variable& output) {
238
+ return std::vector<c10::optional<Variable>>{output};
239
+ }
240
+
241
+ inline std::vector<c10::optional<Variable>> to_optional(variable_list& output) {
242
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
243
+ std::vector<c10::optional<Variable>> result;
244
+ std::transform(
245
+ output.begin(),
246
+ output.end(),
247
+ std::back_inserter(result),
248
+ [](const Variable& var) { return var; });
249
+ return result;
250
+ }
251
+
252
+ template <class T>
253
+ template <typename X, typename... Args>
254
+ auto Function<T>::apply(Args&&... args)
255
+ -> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>> {
256
+ const auto& functorch_tls = at::functorch::functorchTLSAccessor();
257
+ if (functorch_tls) {
258
+ // Function support for functorch is handled in Python.
259
+ // Here we are dealing with a (C++) Function, which is not supported.
260
+ // Let's raise an error instead of being silently incorrect.
261
+ functorch_tls->checkSupportsCppAutogradFunction();
262
+ }
263
+
264
+ std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
265
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
266
+ variable_list input_vars;
267
+
268
+ const size_t num_inputs = sizeof...(Args);
269
+ input_vars.reserve(num_inputs);
270
+ node->is_variable_input_.reserve(num_inputs);
271
+ // TODO Add tracing here
272
+ extract_vars(node->is_variable_input_, input_vars, args...);
273
+
274
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
275
+ bool is_executable =
276
+ GradMode::is_enabled() && any_variable_requires_grad(input_vars);
277
+ auto next_edges =
278
+ (is_executable ? collect_next_edges(input_vars) : edge_list());
279
+ node->set_ctx_grad_fn(node);
280
+ node->set_next_edges(std::move(next_edges));
281
+ node->clear_input_metadata();
282
+
283
+ node->input_info_.reserve(input_vars.size());
284
+ for (auto& var : input_vars) {
285
+ node->input_info_.emplace_back(var);
286
+ }
287
+
288
+ using forward_return_t = forward_t<X, Args...>;
289
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
290
+ forward_return_t outputs;
291
+ {
292
+ AutoGradMode grad_mode(false);
293
+ outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
294
+ }
295
+
296
+ _jvp_fn_t jvp_fn = [](const variable_list& inputs,
297
+ const variable_list& gI) -> variable_list {
298
+ TORCH_CHECK(
299
+ false,
300
+ "jvp is not implemented for the c++ API of custom Function yet.",
301
+ "Please open a feature request on GitHub if you need this.");
302
+ };
303
+
304
+ auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
305
+ return x.view_as(x);
306
+ };
307
+
308
+ auto wrapped_outputs = _wrap_outputs(
309
+ input_vars,
310
+ node->ctx_.get_non_differentiable(),
311
+ node->ctx_.get_and_bump_dirty(),
312
+ to_optional(outputs),
313
+ is_executable ? node : nullptr,
314
+ jvp_fn,
315
+ {},
316
+ view_as_self_fn);
317
+
318
+ node->output_info_.reserve(wrapped_outputs.size());
319
+ for (auto& output : wrapped_outputs) {
320
+ if (is_executable && output.has_value()) {
321
+ node->output_info_.emplace_back(output.value());
322
+ } else if (is_executable) {
323
+ node->output_info_.emplace_back();
324
+ }
325
+ }
326
+
327
+ if (is_executable) {
328
+ node->save_variables_to_ctx();
329
+ }
330
+
331
+ // wrapped_outputs will be a variable_list so, convert it to the correct
332
+ // return type. Only Variable and variable_list are accepted as return types.
333
+ return to_output_type<forward_return_t>(wrapped_outputs);
334
+ }
335
+
336
+ // The logic here is the same as PyNode::apply, so changes to it should be done
337
+ // in both the places
338
+ template <class T>
339
+ // NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
340
+ variable_list CppNode<T>::apply(variable_list&& inputs) {
341
+ at::OptionalDeviceGuard _device_guard;
342
+
343
+ auto num_inputs = inputs.size();
344
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
345
+ variable_list backward_inputs;
346
+ backward_inputs.reserve(num_inputs);
347
+ for (const auto i : c10::irange(num_inputs)) {
348
+ if (inputs[i].defined() || !ctx_.materialize_grads_) {
349
+ backward_inputs.emplace_back(std::move(inputs[i]));
350
+ } else {
351
+ backward_inputs.emplace_back(output_info_[i].zeros(_device_guard));
352
+ }
353
+ }
354
+
355
+ // Acquire lock to here protect thread safety on custom C++ Autograd Node
356
+ // This is needed for the custom Autograd Node since we don't know if the
357
+ // user defined Node will write to the shared data during backward.
358
+ // see Note [Thread Safety on Autograd Node]
359
+ std::lock_guard<std::mutex> lock(mutex_);
360
+
361
+ auto outputs = T::backward(&ctx_, backward_inputs);
362
+
363
+ const auto num_forward_inputs =
364
+ static_cast<int64_t>(is_variable_input_.size());
365
+ auto num_outputs = static_cast<int64_t>(outputs.size());
366
+ // Returning too many results is ok, but only as long as they're all
367
+ // undefined. Truncate the result vector in that case.
368
+ if (num_outputs > num_forward_inputs) {
369
+ bool all_undef = true;
370
+ for (const auto i : c10::irange(num_forward_inputs, num_outputs)) {
371
+ all_undef &= (!outputs[i].defined());
372
+ }
373
+ if (all_undef) {
374
+ outputs.resize(num_forward_inputs);
375
+ num_outputs = num_forward_inputs;
376
+ }
377
+ }
378
+
379
+ if (num_outputs != num_forward_inputs) {
380
+ std::string msg("function ");
381
+ msg += name() + " returned an incorrect number of gradients (expected ";
382
+ msg += c10::to_string(num_forward_inputs) + ", got ";
383
+ msg += c10::to_string(num_outputs) + ")";
384
+ throw std::runtime_error(msg);
385
+ }
386
+
387
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
388
+ variable_list results;
389
+ results.reserve(num_outputs);
390
+ for (const auto i : c10::irange(num_outputs)) {
391
+ if (!is_variable_input_[i]) {
392
+ if (outputs[i].defined()) {
393
+ std::string msg("function ");
394
+ msg += name() +
395
+ " returned a gradient different that is defined at position ";
396
+ msg += c10::to_string(i + 1) +
397
+ ", but the corresponding forward input was not a Variable";
398
+ throw std::runtime_error(msg);
399
+ }
400
+ continue;
401
+ }
402
+ results.emplace_back(outputs[i]);
403
+ }
404
+ return results;
405
+ }
406
+
407
+ template <class T>
408
+ void CppNode<T>::release_variables() {
409
+ // lock to ensure thread safety, see [Thread Safety on Autograd Node]
410
+ std::lock_guard<std::mutex> lock(mutex_);
411
+ ctx_.saved_variables_.clear();
412
+ ctx_.has_freed_buffers_ = true;
413
+ }
414
+
415
+ template <class T>
416
+ void CppNode<T>::save_variables_to_ctx() {
417
+ ctx_.save_variables();
418
+ }
419
+
420
+ template <class T>
421
+ void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
422
+ ctx_.grad_fn_ = node;
423
+ }
424
+
425
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <functional>
5
+ #include <memory>
6
+
7
+ #include <c10/util/hash.h>
8
+
9
+ namespace torch::autograd {
10
+
11
+ struct Node;
12
+
13
+ /// Represents a particular input of a function.
14
+ struct Edge {
15
+ Edge() noexcept : function(nullptr), input_nr(0) {}
16
+
17
+ Edge(std::shared_ptr<Node> function_, uint32_t input_nr_) noexcept
18
+ : function(std::move(function_)), input_nr(input_nr_) {}
19
+
20
+ /// Convenience method to test if an edge is valid.
21
+ bool is_valid() const noexcept {
22
+ return function != nullptr;
23
+ }
24
+
25
+ // Required for use in associative containers.
26
+ bool operator==(const Edge& other) const noexcept {
27
+ return this->function == other.function && this->input_nr == other.input_nr;
28
+ }
29
+
30
+ bool operator!=(const Edge& other) const noexcept {
31
+ return !(*this == other);
32
+ }
33
+
34
+ /// The function this `Edge` points to.
35
+ std::shared_ptr<Node> function;
36
+
37
+ /// The identifier of a particular input to the function.
38
+ uint32_t input_nr;
39
+ };
40
+ } // namespace torch::autograd
41
+
42
+ // The idiomatic way of enabling use of a custom type as the key of hash
43
+ // containers in C++11. This method removes the requirement of having to pass
44
+ // a custom hasher to std::unordered_{map, set}.
45
+ // See http://en.cppreference.com/w/cpp/utility/hash for more information.
46
+ namespace std {
47
+ template <>
48
+ struct hash<torch::autograd::Edge> {
49
+ // These type aliases are required by the standard.
50
+ using argument_type = torch::autograd::Edge;
51
+ using return_type = size_t;
52
+ return_type operator()(const argument_type& edge) const noexcept {
53
+ return c10::get_hash(edge.function, edge.input_nr);
54
+ }
55
+ };
56
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/anomaly_mode.h>
4
+ #include <torch/csrc/autograd/edge.h>
5
+ #include <torch/csrc/autograd/grad_mode.h>
6
+ #include <torch/csrc/autograd/graph_task.h>
7
+ #include <torch/csrc/autograd/input_metadata.h>
8
+ #include <torch/csrc/autograd/saved_variable.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <ATen/SequenceNumber.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/record_function.h>
16
+ #include <c10/util/Exception.h>
17
+ #include <c10/util/irange.h>
18
+
19
+ #include <algorithm>
20
+ #include <cstdint>
21
+ #include <initializer_list>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ namespace torch::autograd {
28
+
29
+ struct Edge;
30
+ struct FunctionPostHook;
31
+ struct FunctionPreHook;
32
+
33
+ using tensor_list = std::vector<at::Tensor>;
34
+ using variable_list = std::vector<Variable>;
35
+ using edge_list = std::vector<Edge>;
36
+ using saved_variable_list = std::vector<SavedVariable>;
37
+ using IndexRange = std::pair<size_t, size_t>;
38
+ using torch::dynamo::autograd::CompiledNodeArgs;
39
+ using torch::dynamo::autograd::SwapSavedVariables;
40
+
41
+ // Custom deleter to prevent stack overflows.
42
+ TORCH_API void deleteNode(Node* function);
43
+
44
+ // Guard that sets and restores the evaluating node
45
+ class NodeGuard {
46
+ public:
47
+ explicit NodeGuard(std::shared_ptr<Node> node);
48
+ ~NodeGuard();
49
+
50
+ private:
51
+ std::shared_ptr<Node> last_evaluating_node_;
52
+ };
53
+
54
+ // Return the Node currently being evaluated (if any)
55
+ // This is only set during the backward pass while a Node is being
56
+ // executed.
57
+ TORCH_API std::shared_ptr<Node> get_current_node();
58
+
59
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60
+ // Node
61
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
62
+ // A `Node` is an abstract class that represents an operation taking zero
63
+ // or more input `Variable`s and producing zero or more output `Variable`s. All
64
+ // functions in PyTorch's autograd machinery derive from this class and
65
+ // override its `apply` method. Instances of such subclasses will then be
66
+ // invokable via the call operator.
67
+ //
68
+ // Nodes in the Autograd Graph
69
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
70
+ // When viewing the autograd system as a graph, `Node`s are the vertices or
71
+ // nodes, connected to each other via (directed) `Edge`s, which themselves are
72
+ // represented via (`Node`, input_nr) pairs. `Variable`s are the outputs to
73
+ // and inputs of `Node`s, and travel between these edges during execution
74
+ // of the graph. When two or more `Edge`s (from different sources) point at the
75
+ // same input to a `Node`, the values produced along all of these edges are
76
+ // implicitly summed prior to being forwarded to the target `Node`.
77
+ //
78
+ // Hierarchy
79
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80
+ // Subclasses usually represent differentiable functions as well as their
81
+ // gradient operators. Note, however, that due to the very general definition
82
+ // of a `Node` taking *zero* or more inputs and producing *zero* or more
83
+ // outputs, uses of `Node`s are flexible and extend beyond purely
84
+ // mathematical operations. For example, the `AccumulateGrad` function is a
85
+ // *sink*: it takes one input, but produces no outputs, instead accumulating
86
+ // the input as a side effect. At the other extreme, the `GraphRoot` function
87
+ // receives no inputs from other functions, but produces multiple outputs.
88
+ //
89
+ // Interface
90
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
91
+ // The most important method on `Node` is the call operator, which takes in
92
+ // a list of variables and produces a list of variables. The precise size of
93
+ // these lists can be determined with `num_inputs()` and `num_outputs()`.
94
+ // `Node`s are stitched together via their `next_edge` interface, which let
95
+ // you manipulate the set of outgoing edges of a `Node`. You can add an
96
+ // edge with `add_next_edge()`, retrieve an edge with `next_edge(index)` and
97
+ // iterate over them via the `next_edges()` method. Other methods exist for
98
+ // integration with the JIT and other parts of PyTorch. Every `Node` has a
99
+ // *sequence number* that increases monotonically in the order of `Node`
100
+ // construction. It can be retrieved via the `sequence_nr()` method. Note that
101
+ // this sequence number is *thread local*. This means that when `Node`s
102
+ // `A`, `B` and `C` are created consecutively in the same thread, their
103
+ // sequence numbers will be ordered `A` < `B` < `C`. If, however, `A` and `B`
104
+ // are created in one thread and `C` is created in a new thread, there are *no
105
+ // guarantees* w.r.t. the ordering of `C` relative to `A` or `B`.
106
+ // See NOTE [ Sequence Number] for more details on the usages of sequence
107
+ // number.
108
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
109
+ struct TORCH_API Node : std::enable_shared_from_this<Node> {
110
+ public:
111
+ /// Construct a new `Node` with the given `next_edges`
112
+ explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list())
113
+ : sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) {
114
+ for (const Edge& edge : next_edges_) {
115
+ update_topological_nr(edge);
116
+ }
117
+
118
+ if (AnomalyMode::is_enabled()) {
119
+ metadata()->store_stack();
120
+
121
+ // If anomaly mode is enabled and graph is constructed, then assign the
122
+ // currently evaluating node as the parent of this node.
123
+ // A parent is a Node where this Node is created.
124
+ // We are tracking the parents to track multiple backward operations.
125
+ assign_parent();
126
+ }
127
+
128
+ // Store the thread_id of the forward operator.
129
+ // See NOTE [ Sequence Numbers ]
130
+ thread_id_ = at::RecordFunction::currentThreadId();
131
+ }
132
+
133
+ explicit Node(edge_list&& next_edges = edge_list())
134
+ : Node(
135
+ /*sequence_nr=*/at::sequence_number::get_and_increment(),
136
+ std::move(next_edges)) {}
137
+
138
+ /// Nodes are neither copyable nor moveable.
139
+ Node(const Node& other) = delete;
140
+ Node(Node&& other) = delete;
141
+ Node& operator=(const Node& other) = delete;
142
+ Node& operator=(Node&& other) = delete;
143
+ virtual ~Node() = default;
144
+
145
+ std::shared_ptr<Node> getptr() {
146
+ return shared_from_this();
147
+ }
148
+ /// Evaluates the function on the given inputs and returns the result of the
149
+ /// function call.
150
+ variable_list operator()(variable_list&& inputs) {
151
+ // In the first iteration of named tensors, autograd ignores names and
152
+ // operates on unnamed tensors. In the long term, autograd should
153
+ // probably operate with names.
154
+ at::NoNamesGuard no_names_guard;
155
+
156
+ #ifdef USE_ROCM
157
+ // Keep track of backward pass for rocblas.
158
+ at::ROCmBackwardPassGuard in_backward;
159
+ #endif
160
+
161
+ auto step_callbacks =
162
+ at::getStepCallbacksUnlessEmpty(at::RecordScope::BACKWARD_FUNCTION);
163
+ if (C10_UNLIKELY(step_callbacks.has_value())) {
164
+ at::RecordFunction guard(std::move(*step_callbacks));
165
+ // Using sequence number and thread id to correlate with
166
+ // the forward pass function
167
+ guard.setForwardThreadId(thread_id_);
168
+ if (guard.needsInputs()) {
169
+ std::vector<c10::IValue> inputs_vec(inputs.begin(), inputs.end());
170
+ guard.before(
171
+ name(),
172
+ c10::ArrayRef<const c10::IValue>(
173
+ inputs_vec.data(), inputs_vec.size()),
174
+ static_cast<int64_t>(sequence_nr()));
175
+ } else {
176
+ guard.before(name(), static_cast<int64_t>(sequence_nr()));
177
+ }
178
+ return apply(std::move(inputs));
179
+ } else {
180
+ return apply(std::move(inputs));
181
+ }
182
+ }
183
+
184
+ // Graph Connectivity API
185
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
186
+
187
+ // Inputs. NOTE: inputs of the grad_fn correspond to Tensor outputs of the
188
+ // forward function.
189
+
190
+ // Marker for expected undefined input
191
+ struct undefined_input {};
192
+
193
+ /// Adds the type and shape metadata for a new input. Returns the index of
194
+ /// of the new input.
195
+ uint32_t add_input_metadata(
196
+ const at::TensorOptions& options,
197
+ c10::SymIntArrayRef shape,
198
+ bool is_tensor_subclass,
199
+ bool is_nested) noexcept {
200
+ uint32_t input_nr = input_metadata_.size();
201
+ auto meta_shape = MetadataShape{std::in_place_type<SymIntSmallVec>, shape};
202
+ input_metadata_.emplace_back(
203
+ options, meta_shape, is_tensor_subclass, is_nested);
204
+ return input_nr;
205
+ }
206
+
207
+ uint32_t add_input_metadata(const at::Tensor& t) noexcept {
208
+ uint32_t input_nr = input_metadata_.size();
209
+ input_metadata_.emplace_back(t);
210
+ return input_nr;
211
+ }
212
+
213
+ /// Adds a placeholder for an input that will not be used.
214
+ uint32_t add_input_metadata(undefined_input u) noexcept {
215
+ uint32_t input_nr = input_metadata_.size();
216
+ input_metadata_.emplace_back();
217
+ return input_nr;
218
+ }
219
+
220
+ uint32_t num_inputs() const noexcept {
221
+ return input_metadata_.size();
222
+ }
223
+
224
+ const InputMetadata& input_metadata(size_t index) const {
225
+ return input_metadata_[index];
226
+ }
227
+
228
+ // Danger: not thread safe, caller must protect with lock
229
+ InputMetadata& mutable_input_metadata(size_t index) {
230
+ return input_metadata_[index];
231
+ }
232
+
233
+ /**
234
+ * Note: Function Streams
235
+ * A function's stream (for a given device type) is the stream of the first
236
+ * element of its input buffer on a device of that type.
237
+ *
238
+ * If all elements are on the same device they MUST share a stream. If
239
+ * elements are on different devices (across multiple GPUs, for example)
240
+ * they may have different streams.
241
+ */
242
+ c10::optional<c10::Stream> stream() {
243
+ auto opt_device_type = at::getAccelerator();
244
+ if (!opt_device_type.has_value()) {
245
+ return c10::nullopt;
246
+ }
247
+ for (const auto& metadata : input_metadata_) {
248
+ if (metadata.device().type() == opt_device_type.value())
249
+ return metadata.stream();
250
+ }
251
+
252
+ return c10::nullopt;
253
+ }
254
+
255
+ void clear_input_metadata() {
256
+ input_metadata_.clear();
257
+ }
258
+
259
+ // Outputs ("Next Edges")
260
+
261
+ void update_topological_nr(const Edge& edge) {
262
+ TORCH_INTERNAL_ASSERT(
263
+ !has_parent_,
264
+ "Cannot update a node's topological_nr after it already has a parent."
265
+ " If we allow this, we can no longer guarantee that a parent's"
266
+ " topo_nr is always greater than those of all its children")
267
+ Node* node = edge.function.get();
268
+ if (node) {
269
+ auto topo_nr = node->topological_nr();
270
+ if (topological_nr_ <= topo_nr) {
271
+ topological_nr_ = topo_nr + 1;
272
+ }
273
+ }
274
+ }
275
+
276
+ void set_next_edge(size_t index, Edge edge) {
277
+ update_topological_nr(edge);
278
+ next_edges_[index] = std::move(edge);
279
+ }
280
+
281
+ void add_next_edge(Edge edge) {
282
+ update_topological_nr(edge);
283
+ next_edges_.emplace_back(std::move(edge));
284
+ }
285
+
286
+ void set_next_edges(edge_list&& next_edges) {
287
+ next_edges_ = std::move(next_edges);
288
+ for (const auto& next_edge : next_edges_) {
289
+ update_topological_nr(next_edge);
290
+ }
291
+ }
292
+
293
+ const Edge& next_edge(size_t index) const noexcept {
294
+ return next_edges_[index];
295
+ }
296
+
297
+ const edge_list& next_edges() const noexcept {
298
+ return next_edges_;
299
+ }
300
+
301
+ edge_list& next_edges() noexcept {
302
+ return next_edges_;
303
+ }
304
+
305
+ uint32_t num_outputs() const noexcept {
306
+ return next_edges_.size();
307
+ }
308
+
309
+ // Miscellaneous Methods
310
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
311
+
312
+ /// NOTE [ Sequence Number]
313
+ ///
314
+ /// The sequence_nr has two main usages in autograd:
315
+ ///
316
+ /// 1) Helps determine the node's execution priority in the engine.
317
+ /// All else being equal, nodes with higher priority numbers are executed
318
+ /// first. Thus, nodes corresponding to ops executed later are the first to
319
+ /// be executed in the backward pass. One caveat is that we prioritize
320
+ /// AccumulateGrad nodes by explicitly setting its sequence_nr to be
321
+ /// UINT64_MAX.
322
+ /// 2) The sequence number of this `Node` is paired with with thread_id it was
323
+ /// created in
324
+ /// as a unique identifier by the profiler to annotate recorded events.
325
+ /// The purpose of this is to help users (and possibly programs)
326
+ /// interpreting the profiler's output to correlate backward nodes with its
327
+ /// forward ops. We need both sequence_nr and thread_id to identify a node
328
+ /// because sequence_nr is thread_local, i.e., starts counting up from zero
329
+ /// in a new thread
330
+ uint64_t sequence_nr() const noexcept {
331
+ return sequence_nr_;
332
+ }
333
+
334
+ void set_sequence_nr(uint64_t sequence_nr) {
335
+ sequence_nr_ = sequence_nr;
336
+ }
337
+
338
+ // NOTE [ Topological Number ]
339
+ //
340
+ // topological_nr is used to prune branches in the DAG during autograd
341
+ // discovery as maintaining topological_nr helps us check in O(1) if there
342
+ // does NOT exist a directed path between two nodes.
343
+ //
344
+ // The topological order number of this `Node` representing the length of the
345
+ // longest possible path from this Node to any leaf node. If you are leaf
346
+ // node, aka AccumulateGrad, this will be zero. This value has the property
347
+ // that For every pair of nodes X, Y in G, existence of a directed path from X
348
+ // to Y implies topo_nr(X) > topo_nr(Y). The converse is not true, however, so
349
+ // we cannot prove existence of a path from X to Y, only non-existence.
350
+ //
351
+ // One assumption we make when using topo_nr is that once a node
352
+ // has been used, i.e., has a parent node, its own topo_nr does not change
353
+ // we have added some checks with the `has_parent_` field to enforce this.
354
+ //
355
+ // What NOT to do:
356
+ //
357
+ // 1) 2 -> 1 -> 0 In this diagram we label nodes with their
358
+ // topo_nr.
359
+ // 2 -> 1 -> 0 We have two simple graphs that can each
360
+ // arise from
361
+ // `t.exp().exp()`, for example.
362
+ // 2) 2 -> 1 -> 0
363
+ // /
364
+ // 2 -> 1 -> 0 We add 2 as a next edge to 1 even though 1
365
+ // already
366
+ // has a parent.
367
+ // 3) 2 -> 1 -> 0
368
+ // /
369
+ // 2 -> 3 -> 0 2 < 3, yet there exists a path from 2 to 3!
370
+ //
371
+ uint64_t topological_nr() const noexcept {
372
+ has_parent_ = true;
373
+ return topological_nr_;
374
+ }
375
+
376
+ // assigning a node as a parent to this node
377
+ void assign_parent();
378
+
379
+ /// Id of the thread that created Node
380
+ uint64_t thread_id() const noexcept {
381
+ return thread_id_;
382
+ }
383
+
384
+ /// Returns the name of the dynamic type of the function, for debugging.
385
+ virtual std::string name() const;
386
+
387
+ /// The difference between functions `should_compute_output` and
388
+ /// `task_should_compute_output`:
389
+ /// - `should_compute_output` should only be used during graph construction
390
+ /// and takes into account only requires_grad information
391
+ /// - `task_should_compute_output` should only be called during the backward
392
+ /// pass (unless called directly through grad_fn) and takes into account the
393
+ /// current graph task. Specifically, the autograd engine trims unnecessary
394
+ /// edges when `inputs` are specified, and during backward untrimmed nodes
395
+ /// left on the graph can/should check `task_should_compute_output` to see if
396
+ /// any outgoing edges have been trimmed by the engine. If that is the case,
397
+ /// gradient computation wrt those edges can be omitted.
398
+ ///
399
+ /// Returns true if the particular output edge is active, and that particular
400
+ /// output of this function should be computed.
401
+ bool should_compute_output(size_t output_edge_index) const {
402
+ TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
403
+ return next_edges_[output_edge_index].is_valid();
404
+ }
405
+
406
+ /// Returns true if any of the output edges in any of the ranges are active.
407
+ bool should_compute_output(std::initializer_list<IndexRange> idxs) const {
408
+ return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
409
+ for (const auto i : c10::irange(range.first, range.second)) {
410
+ if (should_compute_output(i))
411
+ return true;
412
+ }
413
+ return false;
414
+ });
415
+ }
416
+
417
+ /// Same as the above `should_compute_output` function but will also
418
+ /// check whether this edge is needed within the current graph task.
419
+ bool task_should_compute_output(size_t output_edge_index) const {
420
+ TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
421
+ const auto& next = next_edges_[output_edge_index];
422
+ if (next.is_valid()) {
423
+ const auto exec_info = get_current_graph_task_exec_info();
424
+ if (exec_info && !exec_info->empty()) {
425
+ auto it = exec_info->find(next.function.get());
426
+ if (it == exec_info->end() || !it->second.should_execute()) {
427
+ return false; // this edge is not needed for the current graph_task
428
+ }
429
+ }
430
+ return true;
431
+ }
432
+ return false;
433
+ }
434
+
435
+ /// Returns true if any of the output edges in any of the ranges are active
436
+ /// and should be computed in the current graph task.
437
+ bool task_should_compute_output(
438
+ std::initializer_list<IndexRange> idxs) const {
439
+ return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
440
+ for (const auto i : c10::irange(range.first, range.second)) {
441
+ if (task_should_compute_output(i))
442
+ return true;
443
+ }
444
+ return false;
445
+ });
446
+ }
447
+
448
+ /// Returns the `PyObject` stored for this `Node` (for Python
449
+ /// interaction).
450
+ PyObject* pyobj() const noexcept {
451
+ return pyobj_;
452
+ }
453
+
454
+ /// Sets the `PyObject` stored for this `Node` (for Python interaction).
455
+ void set_pyobj(PyObject* pyobj) noexcept {
456
+ pyobj_ = pyobj;
457
+ }
458
+
459
+ /// Returns the anomaly metadata stored for this `Node`.
460
+ /// If none exist, creates a new empty one.
461
+ AnomalyMetadata* metadata() noexcept;
462
+
463
+ // Hook API
464
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
465
+
466
+ uintptr_t add_post_hook(std::unique_ptr<FunctionPostHook>&& post_hook) {
467
+ post_hooks_.emplace_back(std::move(post_hook));
468
+ // Use the raw pointer as the unique key to identify this hook. This key
469
+ // can then be used in del_post_hook(key) to remove this hook.
470
+ return reinterpret_cast<std::uintptr_t>(post_hooks_.back().get());
471
+ }
472
+
473
+ const std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks()
474
+ const noexcept {
475
+ return post_hooks_;
476
+ }
477
+
478
+ // delete a post hook matching the key
479
+ bool del_post_hook(const uintptr_t& key) {
480
+ for (auto it = post_hooks_.begin(); it != post_hooks_.end(); ++it) {
481
+ if (key == reinterpret_cast<std::uintptr_t>(it->get())) {
482
+ post_hooks_.erase(it);
483
+ return true;
484
+ }
485
+ }
486
+ return false;
487
+ }
488
+
489
+ std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks() noexcept {
490
+ return post_hooks_;
491
+ }
492
+
493
+ void add_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
494
+ pre_hooks_.emplace_back(std::move(pre_hook));
495
+ }
496
+
497
+ void add_tensor_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
498
+ tensor_pre_hooks_.emplace_back(std::move(pre_hook));
499
+ }
500
+
501
+ void add_retains_grad_hook(
502
+ std::unique_ptr<FunctionPreHook>&& pre_hook,
503
+ size_t output_idx) {
504
+ retains_grad_hooks_[output_idx] = std::move(pre_hook);
505
+ }
506
+
507
+ std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(size_t output_idx) {
508
+ auto ret = std::move(retains_grad_hooks_[output_idx]);
509
+ retains_grad_hooks_.erase(output_idx);
510
+ return ret;
511
+ }
512
+
513
+ const std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks()
514
+ const noexcept {
515
+ return pre_hooks_;
516
+ }
517
+
518
+ std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks() noexcept {
519
+ return pre_hooks_;
520
+ }
521
+
522
+ virtual std::vector<std::unique_ptr<FunctionPreHook>>&
523
+ tensor_pre_hooks() noexcept {
524
+ return tensor_pre_hooks_;
525
+ }
526
+
527
+ virtual std::unique_ptr<PostAccumulateGradHook>&
528
+ tensor_post_acc_grad_hooks() noexcept {
529
+ static std::unique_ptr<PostAccumulateGradHook> empty = nullptr;
530
+ return empty;
531
+ }
532
+
533
+ std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>&
534
+ retains_grad_hooks() noexcept {
535
+ return retains_grad_hooks_;
536
+ }
537
+
538
+ // Customization Points for Subclasses
539
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
540
+
541
+ /// Releases saved variables if the operation won't be reused.
542
+ virtual void release_variables() {}
543
+
544
+ /// Called before an apply if `release_variables()` is going to be called.
545
+ /// Allows larger ops like `InterpreterAutogradFunction` to incrementally
546
+ /// release variables as they run.
547
+ virtual void will_release_variables() {}
548
+
549
+ /// Returns true if this function is traceable. An op is traceable if all
550
+ /// operations happening within `apply()` are performed on autograd
551
+ /// `Variables` (i.e. apply mostly instantiates and applies other functions).
552
+ virtual bool is_traceable() {
553
+ return false;
554
+ }
555
+
556
+ /// A `Node` is said to pass state transparently to backward, if the
557
+ /// state consists only of (Saved)Variables and only non-variable objects
558
+ /// that parameterize the operation in some way that defines the graph
559
+ /// structure AND the backward function is traceable. In particular,
560
+ /// parametrization MUST NOT depend on the data of any `Variable`.
561
+ /// TODO: it might be possible to handle cases where backward is
562
+ /// non-traceable but state passing could be considered transparent. This
563
+ /// will probably depend on saved_variable_list being mutable.
564
+ /// NOTE: this value matters only if is_traceable() returns false.
565
+ virtual bool passes_state_transparently() {
566
+ return false;
567
+ }
568
+
569
+ // see [Note: Compiled Autograd]
570
+ // Used by compiled autograd to
571
+ // 1) Extract tensors/symint args
572
+ // 2) Collect node information for specialization and caching
573
+ // Implementations in subclasses should call args.collect() with all node
574
+ // attrs. These functions are only called durring backward.
575
+ virtual void compiled_args(CompiledNodeArgs& args) {
576
+ throw std::runtime_error(
577
+ std::string("compiled_args not implemented: ") + name());
578
+ }
579
+
580
+ // Used by compiled autograd to call apply() with different saved tensors
581
+ // Implementations should call saved.before() on all attrs, then apply(), then
582
+ // saved.after() on all attrs in the same order.
583
+ virtual variable_list apply_with_saved(
584
+ const variable_list& inputs,
585
+ SwapSavedVariables& saved) {
586
+ throw std::runtime_error(
587
+ std::string("apply_with_saved not implemented: ") + name());
588
+ }
589
+
590
+ protected:
591
+ /// Performs the `Node`'s actual operation.
592
+ virtual variable_list apply(variable_list&& inputs) = 0;
593
+
594
+ /// Calls `apply()`, but instruments it with tracing machinery.
595
+ variable_list traced_apply(variable_list inputs);
596
+
597
+ // Sequence number used to correlate backward nodes with forward ops in the
598
+ // profiler and provide determinism in the engine.
599
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
600
+ uint64_t sequence_nr_;
601
+
602
+ // See NOTE [ Topological Number ]
603
+ uint64_t topological_nr_ = 0;
604
+
605
+ // Tracks whether this node has been added as the next_edge of another node
606
+ // via set_next_edge(s), which always calls topological_nr() of all its
607
+ // children See NOTE [ Topological Number ] for why we need this.
608
+ mutable bool has_parent_ = false;
609
+
610
+ // Id of the thread that created the instance
611
+ uint64_t thread_id_ = 0;
612
+
613
+ // Note [Thread Safety on Autograd Node]
614
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
615
+ // Autograd Engine let the owning thread which calls Engine::execute to drive
616
+ // the GraphTask execution, there might be cases that part of the GraphTask is
617
+ // shared across different `backward()` or `grad()` calls, i.e. fork new
618
+ // threads in the middle of the forward and call `backward()` separately from
619
+ // different threads. We need to protect the thread safety on NodeTask to
620
+ // prevent data racing on shared variables read/write.
621
+ //
622
+ // NB: This is only needed for Autograd Nodes that runs on CPU, technically
623
+ // "CUDA", "XLA" nodes don't need locking because device threads are always
624
+ // single threaded.
625
+ //
626
+ // Here we add a thread mutex to help protect the Node's thread safety, so
627
+ // that different threads cannot race the shared data when executing the same
628
+ // NodeTask from multiple CPU threads. It IS the user/developer responsibility
629
+ // to take advantage of this mutex to protect the thread safety of their
630
+ // autograd Node. The general strategy of thread safety on autograd Node:
631
+ //
632
+ // 1. User should lock the mutex during Node::release_variables() if the Node
633
+ // needs
634
+ // to release the variables on the fly, this serve the purpose that when we
635
+ // release saved_variables from one thread, no other threads can release
636
+ // the saved variables concurrently. call the Node::apply(),
637
+ // 2. User should lock the mutex during Node::apply(), this is to ensure Node
638
+ // that
639
+ // writing to the shared variable are not racing across threads (i.e.
640
+ // AccumulateGrad and custom C++ Autograd Node if writing to shared
641
+ // variables )
642
+ // 3. item 2 and item 3 should work together so that when we release saved
643
+ // variables
644
+ // from one thread, no other threads can call Node::apply(), this ensures
645
+ // the variable references from other threads aren't dangling.
646
+ // 4. if the Node don't release any variables and no shared data read/write in
647
+ // the Node
648
+ // i.e. purely functional, user don't need to lock the mutex
649
+ //
650
+ // This way we could protect the thread safety on Autograd Node, but we could
651
+ // still not protect the thread safety on Node pre/post C++ hooks (python
652
+ // hooks are automatically thread safe), we rely on the user to write thread
653
+ // safe C++ hooks if they want the hook to be correctly applied in
654
+ // multithreading environment.
655
+ std::mutex mutex_;
656
+
657
+ edge_list next_edges_;
658
+ PyObject* pyobj_ = nullptr; // weak reference
659
+ std::unique_ptr<AnomalyMetadata> anomaly_metadata_ = nullptr;
660
+
661
+ // NOTE [Hooks ordering]
662
+ // We have 3 separate fields for pre hooks registered to the autograd nodes
663
+ // because the conditions under which they execute are different, and we
664
+ // want more fine-grained control over the order in which different types
665
+ // of hooks are executed.
666
+ // - pre_hooks are only executed when the node itself is executed
667
+ // - tensor_pre_hook is executed as long as the engine traverses over it
668
+ // even if that node won't be executed.
669
+ // - retains_grad_hook are like tensor_pre_hooks except they are always
670
+ // ordered after all other tensor pre hooks
671
+ std::vector<std::unique_ptr<FunctionPreHook>> pre_hooks_;
672
+ std::vector<std::unique_ptr<FunctionPreHook>> tensor_pre_hooks_;
673
+ std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>
674
+ retains_grad_hooks_;
675
+ std::vector<std::unique_ptr<FunctionPostHook>> post_hooks_;
676
+ at::SmallVector<InputMetadata, 2> input_metadata_;
677
+ };
678
+
679
+ /// See Node::is_traceable() for definition.
680
+ struct TraceableFunction : public Node {
681
+ using Node::Node;
682
+ bool is_traceable() final {
683
+ return true;
684
+ }
685
+ };
686
+
687
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
688
+ // Associated Free Nodes
689
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
690
+
691
+ namespace detail {
692
+ // Implementation of `collect_next_edges` (see below).
693
+ struct MakeNextFunctionList : IterArgs<MakeNextFunctionList> {
694
+ edge_list next_edges;
695
+ using IterArgs<MakeNextFunctionList>::operator();
696
+ void operator()(const Variable& variable) {
697
+ if (variable.defined()) {
698
+ next_edges.emplace_back(impl::gradient_edge(variable));
699
+ } else {
700
+ next_edges.emplace_back();
701
+ }
702
+ }
703
+ void operator()(const Variable* variable) {
704
+ operator()(*variable);
705
+ }
706
+ void operator()(const c10::optional<Variable>& variable) {
707
+ if (variable.has_value()) {
708
+ operator()(*variable);
709
+ } else {
710
+ next_edges.emplace_back();
711
+ }
712
+ }
713
+ };
714
+ } // namespace detail
715
+
716
+ /// Create an `Edge` between the given `variable` and the `function`, which is
717
+ /// assumed to be the gradient function of this variable (i.e. the function
718
+ /// through which this variable is backpropagated during the backward pass).
719
+ /// This sets the `grad_fn` property of the `variable`. This function assumes
720
+ /// that the `Variable` is a new input to the gradient function and its
721
+ /// `input_nr` thus equal to `function->num_inputs()`. Additionally, it
722
+ /// increments the `Node`'s number of inputs by one. Approximately
723
+ /// equivalent to `variable.set_gradient_edge(function,
724
+ /// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`.
725
+ /// If you don't want the `Node`'s `num_inputs` to be incremented, use
726
+ /// `set_gradient_edge` directly.
727
+ inline void create_gradient_edge(
728
+ Variable& variable,
729
+ std::shared_ptr<Node> function) {
730
+ // Copy before move.
731
+ const auto input_nr = function->add_input_metadata(variable);
732
+ impl::set_gradient_edge(variable, {std::move(function), input_nr});
733
+ }
734
+
735
+ /// Return true if any of the variables in the list require a gradient.
736
+ inline bool any_variable_requires_grad(const variable_list& variables) {
737
+ return std::any_of(
738
+ variables.begin(), variables.end(), [](const Variable& variable) {
739
+ return variable.defined() && variable.requires_grad();
740
+ });
741
+ }
742
+
743
+ /// Return the next edges of all the given variables, or tuples of variables.
744
+ template <typename... Variables>
745
+ edge_list collect_next_edges(Variables&&... variables) {
746
+ detail::MakeNextFunctionList make;
747
+ make.apply(std::forward<Variables>(variables)...);
748
+ return std::move(make.next_edges);
749
+ }
750
+
751
+ struct TypeAndSize {
752
+ TypeAndSize() : options(at::TensorOptions()) {}
753
+ /* implicit */
754
+ TypeAndSize(const at::Tensor& t)
755
+ : sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
756
+
757
+ at::Tensor zeros();
758
+
759
+ std::vector<c10::SymInt> sym_sizes;
760
+ at::TensorOptions options;
761
+ };
762
+
763
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <string>
6
+ #include <vector>
7
+
8
+ namespace torch::dynamo::autograd {
9
+ class CompiledNodeArgs;
10
+ class SwapSavedVariables;
11
+ } // namespace torch::dynamo::autograd
12
+
13
+ // A hook that's called on gradients
14
+
15
+ namespace torch::autograd {
16
+
17
+ using Variable = at::Tensor;
18
+ using variable_list = std::vector<Variable>;
19
+
20
+ struct TORCH_API FunctionPreHook {
21
+ virtual ~FunctionPreHook() = default;
22
+ virtual variable_list operator()(const variable_list& grads) = 0;
23
+ // only implemented for python hooks, registers hook with compiled autograd
24
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
25
+ throw std::runtime_error(
26
+ std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
27
+ typeid(*this).name());
28
+ }
29
+ };
30
+
31
+ struct TORCH_API FunctionPostHook {
32
+ virtual ~FunctionPostHook() = default;
33
+ virtual variable_list operator()(
34
+ const variable_list& outputs /* grad_inputs */,
35
+ const variable_list& inputs /* grad_outputs */) = 0;
36
+ // only implemented for python hooks, registers hook with compiled autograd
37
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
38
+ throw std::runtime_error(
39
+ std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
40
+ typeid(*this).name());
41
+ }
42
+ };
43
+
44
+ struct TORCH_API PostAccumulateGradHook {
45
+ virtual ~PostAccumulateGradHook() = default;
46
+ virtual void operator()(const Variable& tensor) = 0;
47
+ // only implemented for python hooks on nodes, registers hook with compiled
48
+ // autograd
49
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
50
+ throw std::runtime_error(
51
+ std::string("not yet implemented for compiled autograd: ") +
52
+ typeid(*this).name());
53
+ }
54
+
55
+ virtual void apply_with_saved(
56
+ Variable&,
57
+ torch::dynamo::autograd::SwapSavedVariables&) {
58
+ throw std::runtime_error(
59
+ std::string("not yet implemented for compiled autograd: ") +
60
+ typeid(*this).name());
61
+ }
62
+ };
63
+
64
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/grad_mode.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch::autograd {
7
+
8
+ using GradMode = at::GradMode;
9
+ using AutoGradMode = at::AutoGradMode;
10
+
11
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ExpandUtils.h>
4
+ #include <ATen/NestedTensorImpl.h>
5
+ #include <ATen/core/Tensor.h>
6
+ #include <c10/core/Device.h>
7
+ #include <c10/core/DeviceType.h>
8
+ #include <c10/core/Stream.h>
9
+ #include <c10/core/SymIntArrayRef.h>
10
+ #include <c10/core/TensorImpl.h>
11
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
12
+ #include <c10/util/DimVector.h>
13
+ #include <c10/util/Exception.h>
14
+ #include <c10/util/SmallVector.h>
15
+
16
+ #ifndef AT_PER_OPERATOR_HEADERS
17
+ #include <ATen/Functions.h>
18
+ #else
19
+ #include <ATen/ops/zeros.h>
20
+ #endif
21
+
22
+ namespace torch::autograd {
23
+
24
+ using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
25
+ using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
26
+
27
+ /**
28
+ * Records TensorOptions, shape of the tensor, whether or not the Python
29
+ * dispatch key is set (tensor subclass), and, where applicable, the stream the
30
+ * corresponding operation took place on.
31
+ *
32
+ * If is_valid() is false, then the corresponding input is not used and may be
33
+ * an undefined tensor.
34
+ */
35
+ struct TORCH_API InputMetadata {
36
+ InputMetadata() = default;
37
+ InputMetadata(
38
+ const at::TensorOptions& options,
39
+ MetadataShape input_shape,
40
+ bool is_tensor_subclass,
41
+ bool is_nested);
42
+ InputMetadata(const at::Tensor& t);
43
+
44
+ const at::TensorOptions& options() const {
45
+ return options_;
46
+ }
47
+
48
+ caffe2::TypeMeta dtype() const {
49
+ return options_.dtype();
50
+ }
51
+
52
+ at::Device device() const {
53
+ return options_.device();
54
+ }
55
+
56
+ at::Layout layout() const {
57
+ return options_.layout();
58
+ }
59
+
60
+ c10::Stream stream() const {
61
+ return stream_;
62
+ }
63
+
64
+ bool is_tensor_subclass() const {
65
+ return is_tensor_subclass_;
66
+ }
67
+
68
+ at::Tensor zeros_like() const;
69
+
70
+ bool is_same_shape(const at::Tensor& grad) const;
71
+
72
+ bool is_expandable_to_shape(const at::Tensor& grad) const;
73
+
74
+ at::Tensor reduce_grad(at::Tensor& grad) const;
75
+
76
+ at::Tensor maybe_reduce(
77
+ const size_t index,
78
+ at::Tensor grad,
79
+ const std::function<std::string(const std::string&)>& format_error) const;
80
+
81
+ std::stringstream incompatible_shape_error_message(
82
+ const size_t index,
83
+ const at::Tensor& grad) const;
84
+
85
+ bool was_default_constructed() const {
86
+ return was_default_constructed_;
87
+ }
88
+
89
+ bool is_cpp_nested_tensor() const;
90
+
91
+ bool is_nested_tensor() const {
92
+ return is_nested_;
93
+ }
94
+
95
+ c10::SymIntArrayRef shape_as_dim_vector() const;
96
+
97
+ // Danger: not thread safe, caller must protect with lock
98
+ SymIntSmallVec& mutable_shape_as_dim_vector();
99
+
100
+ private:
101
+ at::Tensor shape_as_tensor() const;
102
+ bool is_nestedness_same(const at::Tensor& grad) const;
103
+ bool maybe_expandable_to(const at::Tensor& grad) const;
104
+
105
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
106
+ const at::TensorOptions options_;
107
+ MetadataShape shape_;
108
+ c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device());
109
+ bool is_tensor_subclass_ = false;
110
+ bool is_nested_ = false;
111
+ bool was_default_constructed_ = true;
112
+ };
113
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd::profiler::python_tracer {
4
+
5
+ void init();
6
+
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+ #include <memory>
5
+ #include <typeinfo>
6
+
7
+ #include <torch/csrc/Exceptions.h>
8
+ #include <torch/csrc/autograd/function.h>
9
+ #include <torch/csrc/utils/object_ptr.h>
10
+
11
+ namespace torch::autograd {
12
+
13
+ struct THPCppFunction {
14
+ PyObject_HEAD std::shared_ptr<Node> cdata;
15
+ };
16
+
17
+ template <typename Ctor>
18
+ PyObject* CppFunction_pynew(
19
+ PyTypeObject* type,
20
+ PyObject* args,
21
+ PyObject* kwds) {
22
+ THPObjectPtr obj(type->tp_alloc(type, 0));
23
+ if (!obj)
24
+ return nullptr;
25
+ THPCppFunction* f = (THPCppFunction*)obj.get();
26
+ HANDLE_TH_ERRORS
27
+ new (&f->cdata) std::shared_ptr<Node>(Ctor()(args));
28
+ END_HANDLE_TH_ERRORS
29
+ if (!f->cdata) {
30
+ return nullptr;
31
+ }
32
+ return obj.release();
33
+ }
34
+
35
+ #define THP_FUNCTION_DEFAULT_METHODS \
36
+ {(char*)"_register_hook_dict", \
37
+ THPCppFunction_register_hook_dict, \
38
+ METH_O, \
39
+ nullptr}, \
40
+ {(char*)"register_hook", THPCppFunction_register_hook, METH_O, nullptr}, \
41
+ {(char*)"register_prehook", \
42
+ THPCppFunction_register_prehook, \
43
+ METH_O, \
44
+ nullptr}, \
45
+ {(char*)"name", THPCppFunction_name, METH_NOARGS, nullptr}, \
46
+ {(char*)"_sequence_nr", \
47
+ THPCppFunction_sequence_nr, \
48
+ METH_NOARGS, \
49
+ nullptr}, \
50
+ { \
51
+ (char*)"_set_sequence_nr", THPCppFunction_set_sequence_nr, METH_O, nullptr \
52
+ }
53
+
54
+ #define THP_FUNCTION_DEFAULT_PROPERTIES \
55
+ {(char*)"next_functions", \
56
+ THPCppFunction_next_functions, \
57
+ nullptr, \
58
+ nullptr, \
59
+ nullptr}, \
60
+ {(char*)"requires_grad", \
61
+ THPCppFunction_requires_grad, \
62
+ nullptr, \
63
+ nullptr, \
64
+ nullptr}, \
65
+ { \
66
+ (char*)"metadata", THPCppFunction_metadata, nullptr, nullptr, nullptr \
67
+ }
68
+
69
+ PyObject* THPCppFunction_next_functions(PyObject* self, void* _unused);
70
+ PyObject* THPCppFunction_metadata(PyObject* self, void* _unused);
71
+ PyObject* THPCppFunction_requires_grad(PyObject* self, void* _unused);
72
+ PyObject* THPCppFunction_register_hook_dict(PyObject* self, PyObject* _var);
73
+ PyObject* THPCppFunction_register_hook(PyObject* self, PyObject* hook);
74
+ PyObject* THPCppFunction_register_prehook(PyObject* self, PyObject* hook);
75
+
76
+ PyObject* THPCppFunction_name(PyObject* self, PyObject* noargs);
77
+ PyObject* THPCppFunction_sequence_nr(PyObject* self, PyObject* noargs);
78
+
79
+ PyTypeObject* _initFunctionPyTypeObject(
80
+ PyTypeObject& type,
81
+ const char* name,
82
+ PyGetSetDef* function_properties,
83
+ PyMethodDef* function_methods);
84
+
85
+ PyObject* registerFunctionHook(Node& fn, PyObject* hook);
86
+
87
+ PyObject* registerFunctionPreHook(Node& fn, PyObject* hook);
88
+
89
+ template <typename Ctor>
90
+ PyTypeObject* createForwardFunctionPyTypeObject(
91
+ PyTypeObject& type,
92
+ const char* name,
93
+ PyGetSetDef* function_properties = nullptr,
94
+ PyMethodDef* function_methods = nullptr) {
95
+ type.tp_new = &CppFunction_pynew<Ctor>;
96
+ return _initFunctionPyTypeObject(
97
+ type, name, function_properties, function_methods);
98
+ }
99
+
100
+ void registerCppFunction(const std::type_info& type, PyTypeObject* pytype);
101
+ PyObject* functionToPyObject(const std::shared_ptr<Node>& cdata);
102
+
103
+ bool THPCppFunction_Check(PyObject* obj);
104
+
105
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <torch/csrc/autograd/engine.h>
6
+ #include <torch/csrc/autograd/function.h>
7
+
8
+ bool THPEngine_initModule(PyObject* module);
9
+
10
+ namespace torch::autograd::python {
11
+
12
+ struct PythonEngine : public Engine {
13
+ static Engine& get_python_engine();
14
+ ~PythonEngine() override;
15
+ void thread_init(
16
+ int device,
17
+ const std::shared_ptr<ReadyQueue>& ready_queue,
18
+ bool should_increment) override;
19
+ void thread_on_exception(
20
+ std::shared_ptr<GraphTask> graph_task,
21
+ const std::shared_ptr<Node>& fn,
22
+ std::exception& e) override;
23
+ variable_list execute(
24
+ const edge_list& roots,
25
+ const variable_list& inputs,
26
+ bool keep_graph,
27
+ bool create_graph,
28
+ bool accumulate_grad,
29
+ const edge_list& outputs = {}) override;
30
+
31
+ c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
32
+ const std::shared_ptr<GraphTask>& graph_task,
33
+ std::shared_ptr<Node> graph_root,
34
+ InputBuffer&& input_buffer) override;
35
+
36
+ std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() override;
37
+ std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks()
38
+ override;
39
+
40
+ private:
41
+ PythonEngine();
42
+ };
43
+
44
+ } // namespace torch::autograd::python
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_fft_functions.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd {
4
+
5
+ void initFFTFunctions(PyObject* module);
6
+
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_function.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <torch/csrc/Exceptions.h>
6
+ #include <torch/csrc/autograd/custom_function.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/saved_variable.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+ #include <torch/csrc/utils/object_ptr.h>
11
+
12
+ #include <c10/core/DeviceGuard.h>
13
+ #include <c10/util/Optional.h>
14
+
15
+ #include <memory>
16
+ #include <optional>
17
+ #include <vector>
18
+
19
+ namespace torch::jit {
20
+ struct Graph;
21
+ }
22
+
23
+ namespace torch::autograd {
24
+
25
+ // A Function which is implemented by a Python object (i.e., a THPFunction).
26
+ // Calls to 'apply' are forwarded to the Python method implementation.
27
+ struct PyNode : public Node {
28
+ PyNode(THPObjectPtr obj) : obj(obj.release()) {}
29
+
30
+ PyObject* to_py_args(
31
+ const variable_list& inputs,
32
+ at::OptionalDeviceGuard* device_guard);
33
+ variable_list to_variable_list(
34
+ const PyObject* r,
35
+ const std::vector<bool>& is_variable_input);
36
+
37
+ variable_list apply(variable_list&& inputs) override;
38
+ variable_list compiled_apply(
39
+ variable_list&& inputs,
40
+ std::optional<PyObject*> compiler);
41
+
42
+ void release_variables() override;
43
+ std::string name() const override;
44
+ bool is_traceable() override;
45
+
46
+ void compiled_args(CompiledNodeArgs& args) override;
47
+ variable_list apply_with_saved(
48
+ const variable_list& inputs,
49
+ SwapSavedVariables& saved) override;
50
+
51
+ bool compiled_autograd_should_lift() const;
52
+
53
+ // THPFunction this Function is wrapping. Owning!
54
+ PyObject* obj;
55
+
56
+ // The AutogradCompilerCall::hooks idx corresponding to this node's backward
57
+ std::optional<int> _backward_idx;
58
+
59
+ // The AutogradCompilerCall::hooks idx corresponding to this node's
60
+ // backward_state
61
+ std::optional<int> _backward_state_idx;
62
+
63
+ // NOLINTNEXTLINE(bugprone-exception-escape)
64
+ ~PyNode() override {
65
+ // Can't use THPObjectPtr as a field in this class; destructor won't take
66
+ // out GIL! When I forgot to do this by hand
67
+ // TestAutograd.test_inplace_view_python called me out about it.
68
+ // If python is already dead, leak the wrapped python objects
69
+ if (Py_IsInitialized()) {
70
+ pybind11::gil_scoped_acquire gil;
71
+ Py_DECREF(obj);
72
+ }
73
+ }
74
+ };
75
+
76
+ /**
77
+ * Cast an object into a tuple, if it is not a tuple already. Returns true
78
+ * if the original object was not a tuple.
79
+ */
80
+ inline bool ensure_tuple(THPObjectPtr& obj) {
81
+ if (PyTuple_Check(obj.get()))
82
+ return false;
83
+
84
+ PyObject* tuple = PyTuple_New(1);
85
+ if (!tuple)
86
+ throw python_error();
87
+ PyTuple_SET_ITEM(tuple, 0, obj.release());
88
+ obj = tuple;
89
+ return true;
90
+ }
91
+
92
+ } // namespace torch::autograd
93
+
94
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
95
+ struct THPFunction {
96
+ PyObject_HEAD
97
+
98
+ PyObject* needs_input_grad;
99
+
100
+ // Python tuple of tensors whose variables we should save. Set
101
+ // by Python with 'save_for_backward'. If nullptr, no tensors were
102
+ // saved.
103
+ PyObject* to_save;
104
+ // Python tuple of tensors which are not differentiable. Set by
105
+ // Python with 'mark_non_differentiable'. If nullptr, no tensors were
106
+ // non-differentiable.
107
+ PyObject* non_differentiable;
108
+ // Python tuple of tensors which had inplace updates in the forward()
109
+ // pass. Set by Python with 'mark_dirty'. If nullptr, no tensors were
110
+ // modified inplace.
111
+ PyObject* dirty_tensors;
112
+
113
+ // boolean indicating whether to materialize undefined output grad tensors
114
+ // into tensors full of zeros. Set by Python with 'set_materialize_grads'.
115
+ // Default is true.
116
+ bool materialize_grads;
117
+
118
+ // boolean indicating whether to materialize output grad tensors
119
+ // corresponding to non-differentiable outputs. Normally, someone would
120
+ // already get this behavior by switching off materialize_grads,
121
+ // but there are certain use cases where that is not feasible:
122
+ // https://github.com/pytorch/pytorch/pull/98659#pullrequestreview-1376822560
123
+ bool materialize_non_diff_grads;
124
+
125
+ // This is enabled by compiled autograd as a way to signal to AotAutograd it
126
+ // should call the original FX graph rather than compiling.
127
+ bool compiled_autograd_tracing;
128
+ PyObject* compiled_autograd_backward_state;
129
+ std::vector<c10::SymInt> compiled_autograd_symints;
130
+
131
+ std::vector<torch::autograd::VariableInfo> output_info;
132
+ std::vector<torch::autograd::VariableInfo> input_info;
133
+ std::vector<torch::autograd::SavedVariable> saved_variables;
134
+ // For each input, true if the input is a THPVariable
135
+ std::vector<bool> is_variable_input;
136
+ char has_freed_buffers;
137
+
138
+ PyObject* saved_for_forward;
139
+ // The actual PyNode (in the autograd graph) that this data was
140
+ // saved for. This field may be NULL (because a user can construct
141
+ // a THPFunction directly from Python), but when this field is non-NULL,
142
+ // it is guaranteed that cdata.lock()->obj == this
143
+ //
144
+ // In most ordinary use, this field should always be non-NULL; e.g.,
145
+ // when we allocate a THPFunction because we are running Node.apply,
146
+ // after constructing a THPFunction, we immediately allocate a PyNode
147
+ // for it. We can't enforce this directly in the constructor of
148
+ // THPFunction though, because there's no way to keep it live long enough
149
+ // to save an owning reference to PyNode into the grad_fn of a Variable.
150
+ std::weak_ptr<torch::autograd::PyNode> cdata;
151
+ };
152
+
153
+ bool THPFunction_initModule(PyObject* module);
154
+ extern PyTypeObject THPFunctionType;
155
+ extern PyObject* THPFunctionClass;
156
+ extern PyObject* THPGradientEdgeClass;
157
+
158
+ inline bool THPFunction_Check(PyObject* obj) {
159
+ return PyObject_IsInstance(obj, (PyObject*)&THPFunctionType);
160
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_hook.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/function_hook.h>
4
+ #include <torch/csrc/python_headers.h>
5
+ #include <torch/csrc/utils/object_ptr.h>
6
+
7
+ namespace torch::dynamo::autograd {
8
+ class SwapSavedVariables;
9
+ } // namespace torch::dynamo::autograd
10
+
11
+ namespace torch::autograd {
12
+
13
+ struct PyFunctionTensorPreHook : public FunctionPreHook {
14
+ PyFunctionTensorPreHook(PyObject* dict, size_t value_idx);
15
+ ~PyFunctionTensorPreHook() override;
16
+ variable_list operator()(const variable_list& values) override;
17
+ void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override;
18
+ PyObject* dict;
19
+ size_t value_idx;
20
+ };
21
+
22
+ struct PyFunctionPreHook : public FunctionPreHook {
23
+ PyFunctionPreHook(PyObject* dict);
24
+ ~PyFunctionPreHook() override;
25
+ variable_list operator()(const variable_list& values) override;
26
+ void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override;
27
+ PyObject* dict;
28
+ };
29
+
30
+ struct PyFunctionPostHook : public FunctionPostHook {
31
+ PyFunctionPostHook(PyObject* dict);
32
+ ~PyFunctionPostHook() override;
33
+ variable_list operator()(
34
+ const variable_list& outputs,
35
+ const variable_list& inputs) override;
36
+ void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override;
37
+ PyObject* dict;
38
+ };
39
+
40
+ // PyFunctionTensorPostAccGradHooks is a dictionary of PostAccumulateGradHooks,
41
+ // and it is understandable if you are confused by why it's a subclass. We are
42
+ // simply following the precedent of PyFunctionPreHook and PyFunctionPostHook
43
+ // above to easily enroll into existing infrastructure.
44
+ struct PyFunctionTensorPostAccGradHooks : public PostAccumulateGradHook {
45
+ PyFunctionTensorPostAccGradHooks(PyObject* dict);
46
+ ~PyFunctionTensorPostAccGradHooks() override;
47
+ void operator()(const Variable& tensor) override;
48
+ void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) override;
49
+ void apply_with_saved(
50
+ Variable& tensor,
51
+ torch::dynamo::autograd::SwapSavedVariables& saved) override;
52
+ PyObject* dict;
53
+ };
54
+
55
+ } // namespace torch::autograd
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_linalg_functions.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace torch::autograd {
4
+
5
+ void initLinalgFunctions(PyObject* module);
6
+
7
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_saved_variable_hooks.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <pybind11/pybind11.h>
5
+ #include <torch/csrc/Export.h>
6
+ #include <torch/csrc/autograd/python_variable.h>
7
+ #include <torch/csrc/autograd/saved_variable_hooks.h>
8
+ #include <torch/csrc/python_headers.h>
9
+ #include <torch/csrc/utils/pybind.h>
10
+
11
+ namespace py = pybind11;
12
+
13
+ namespace torch::autograd {
14
+
15
+ struct PySavedVariableHooks : public SavedVariableHooks {
16
+ PySavedVariableHooks(py::function& pack_hook, py::function& unpack_hook);
17
+ void call_pack_hook(const at::Tensor& tensor) override;
18
+ at::Tensor call_unpack_hook() override;
19
+ ~PySavedVariableHooks() override;
20
+
21
+ private:
22
+ PyObject* pack_hook_;
23
+ PyObject* unpack_hook_;
24
+ PyObject* data_ = nullptr;
25
+ };
26
+
27
+ struct PyDefaultSavedVariableHooks {
28
+ static void push_hooks(py::function& pack_hook, py::function& unpack_hook);
29
+ static void pop_hooks();
30
+ static std::unique_ptr<SavedVariableHooks> get_hooks();
31
+ };
32
+
33
+ } // namespace torch::autograd