applied-ai-018 commited on
Commit
1bf3e6e
·
verified ·
1 Parent(s): eb1c9ba

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  6. ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  7. ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  8. venv/lib/python3.10/site-packages/torch/cuda/__init__.py +1412 -0
  9. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/cuda/amp/__init__.py +11 -0
  25. venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py +144 -0
  30. venv/lib/python3.10/site-packages/torch/cuda/amp/common.py +9 -0
  31. venv/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py +28 -0
  32. venv/lib/python3.10/site-packages/torch/cuda/jiterator.py +185 -0
  33. venv/lib/python3.10/site-packages/torch/cuda/memory.py +914 -0
  34. venv/lib/python3.10/site-packages/torch/cuda/profiler.py +61 -0
  35. venv/lib/python3.10/site-packages/torch/include/clog.h +108 -0
  36. venv/lib/python3.10/site-packages/torch/include/cpuinfo.h +1956 -0
  37. venv/lib/python3.10/site-packages/torch/include/dnnl.h +22 -0
  38. venv/lib/python3.10/site-packages/torch/include/dnnl_config.h +22 -0
  39. venv/lib/python3.10/site-packages/torch/include/dnnl_debug.h +22 -0
  40. venv/lib/python3.10/site-packages/torch/include/dnnl_ocl.h +22 -0
  41. venv/lib/python3.10/site-packages/torch/include/dnnl_sycl.h +22 -0
  42. venv/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h +22 -0
  43. venv/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h +22 -0
  44. venv/lib/python3.10/site-packages/torch/include/dnnl_types.h +22 -0
  45. venv/lib/python3.10/site-packages/torch/include/dnnl_version.h +22 -0
  46. venv/lib/python3.10/site-packages/torch/include/experiments-config.h +25 -0
  47. venv/lib/python3.10/site-packages/torch/include/fp16.h +11 -0
  48. venv/lib/python3.10/site-packages/torch/include/fxdiv.h +425 -0
  49. venv/lib/python3.10/site-packages/torch/include/libshm.h +46 -0
  50. venv/lib/python3.10/site-packages/torch/include/nnpack.h +659 -0
ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356151db3e8faf82c073da70a6e056425fcc45a2b0970b707a513579f35f3333
3
+ size 50332843
ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d7627bc9a3c917891aa8408fbeeae1dbc23b07b3b9a003d958165eb8bd1246
3
+ size 50332749
ckpts/universal/global_step20/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523460a090ec65446cc6a8d4693ef0107650c6fd11c6d977e02f123e9863ce23
3
+ size 33555627
ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e54fbc32a8b1254807e878ba1cd35e94b720b342cbc46a4f1c16bdfd873bd7ce
3
+ size 33555612
ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e82149015d85bc08b00227a304e699480097baacbf1704add001ecfa7245340
3
+ size 33555612
ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b81a2876bfdd26b7928ac452190c8e85c73b8a8aa31da6508ef0ff87f6f6469
3
+ size 33555627
ckpts/universal/global_step20/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67d90a6232ca06174ec19b38aa8d4bffa27dfadeb7d3c357fc014fd442ceafd6
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/cuda/__init__.py ADDED
@@ -0,0 +1,1412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This package adds support for CUDA tensor types.
3
+
4
+ It implements the same function as CPU tensors, but they utilize
5
+ GPUs for computation.
6
+
7
+ It is lazily initialized, so you can always import it, and use
8
+ :func:`is_available()` to determine if your system supports CUDA.
9
+
10
+ :ref:`cuda-semantics` has more details about working with CUDA.
11
+ """
12
+
13
+
14
+ import contextlib
15
+ import importlib
16
+ import os
17
+ import sys
18
+ import threading
19
+ import traceback
20
+ import warnings
21
+ from functools import lru_cache
22
+ from typing import Any, Callable, cast, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch._C
26
+ from torch.types import Device
27
+ from .. import device as _device
28
+ from .._utils import _dummy_type, _LazySeedTracker, classproperty
29
+ from ._utils import _get_device_index
30
+ from .graphs import (
31
+ CUDAGraph,
32
+ graph,
33
+ graph_pool_handle,
34
+ is_current_stream_capturing,
35
+ make_graphed_callables,
36
+ )
37
+ from .streams import Event, ExternalStream, Stream
38
+
39
+ try:
40
+ from torch._C import _cudart # type: ignore[attr-defined]
41
+ except ImportError:
42
+ _cudart = None
43
+
44
+ _initialized = False
45
+ _tls = threading.local()
46
+ _initialization_lock = threading.Lock()
47
+ _queued_calls: List[
48
+ Tuple[Callable[[], None], List[str]]
49
+ ] = [] # don't invoke these until initialization occurs
50
+ _is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False)
51
+ _device_t = Union[_device, str, int, None]
52
+
53
+ _HAS_PYNVML = False
54
+ _PYNVML_ERR = None
55
+ try:
56
+ import pynvml # type: ignore[import]
57
+
58
+ _HAS_PYNVML = True
59
+ except ImportError as err:
60
+ _PYNVML_ERR = err # sometimes a lib is installed but the import fails for some other reason, so we log the error for later
61
+
62
+ _lazy_seed_tracker = _LazySeedTracker()
63
+
64
+ # Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA
65
+ if hasattr(torch._C, "_CudaDeviceProperties"):
66
+ _CudaDeviceProperties = torch._C._CudaDeviceProperties
67
+ else:
68
+ _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties") # type: ignore[assignment, misc]
69
+
70
+ if hasattr(torch._C, "_cuda_exchangeDevice"):
71
+ _exchange_device = torch._C._cuda_exchangeDevice
72
+ else:
73
+
74
+ def _exchange_device(device: int) -> int:
75
+ if device < 0:
76
+ return -1
77
+ raise RuntimeError("PyTorch was compiled without CUDA support")
78
+
79
+
80
+ if hasattr(torch._C, "_cuda_maybeExchangeDevice"):
81
+ _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice
82
+ else:
83
+
84
+ def _maybe_exchange_device(device: int) -> int:
85
+ if device < 0:
86
+ return -1
87
+ raise RuntimeError("PyTorch was compiled without CUDA support")
88
+
89
+
90
+ has_half: bool = True
91
+ has_magma: bool = torch._C._has_magma
92
+
93
+ default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment]
94
+
95
+
96
+ def _is_compiled() -> bool:
97
+ r"""Return true if compile with CUDA support."""
98
+ return hasattr(torch._C, "_cuda_getDeviceCount")
99
+
100
+
101
+ def _nvml_based_avail() -> bool:
102
+ return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1"
103
+
104
+
105
+ def is_available() -> bool:
106
+ r"""Return a bool indicating if CUDA is currently available."""
107
+ if not _is_compiled():
108
+ return False
109
+ if _nvml_based_avail():
110
+ # The user has set an env variable to request this availability check that attempts to avoid fork poisoning by
111
+ # using NVML at the cost of a weaker CUDA availability assessment. Note that if NVML discovery/initialization
112
+ # fails, this assessment falls back to the default CUDA Runtime API assessment (`cudaGetDeviceCount`)
113
+ return device_count() > 0
114
+ else:
115
+ # The default availability inspection never throws and returns 0 if the driver is missing or can't
116
+ # be initialized. This uses the CUDA Runtime API `cudaGetDeviceCount` which in turn initializes the CUDA Driver
117
+ # API via `cuInit`
118
+ return torch._C._cuda_getDeviceCount() > 0
119
+
120
+
121
+ def is_bf16_supported():
122
+ r"""Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16."""
123
+ # Check for ROCm, if true return true, no ROCM_VERSION check required,
124
+ # since it is supported on AMD GPU archs.
125
+ if torch.version.hip:
126
+ return True
127
+
128
+ device = torch.cuda.current_device()
129
+
130
+ # Check for CUDA version and device compute capability.
131
+ # This is a fast way to check for it.
132
+ cuda_version = torch.version.cuda
133
+ if (
134
+ cuda_version is not None
135
+ and int(cuda_version.split(".")[0]) >= 11
136
+ and torch.cuda.get_device_properties(device).major >= 8
137
+ ):
138
+ return True
139
+
140
+ # Finally try to create a bfloat16 device.
141
+ return _check_bf16_tensor_supported(device)
142
+
143
+
144
+ @lru_cache(maxsize=16)
145
+ def _check_bf16_tensor_supported(device: _device_t):
146
+ try:
147
+ torch.tensor([1.0], dtype=torch.bfloat16, device=device)
148
+ return True
149
+ except Exception:
150
+ return False
151
+
152
+
153
+ def _sleep(cycles):
154
+ torch._C._cuda_sleep(cycles)
155
+
156
+
157
+ def _check_capability():
158
+ incorrect_binary_warn = """
159
+ Found GPU%d %s which requires CUDA_VERSION >= %d to
160
+ work properly, but your PyTorch was compiled
161
+ with CUDA_VERSION %d. Please install the correct PyTorch binary
162
+ using instructions from https://pytorch.org
163
+ """
164
+
165
+ old_gpu_warn = """
166
+ Found GPU%d %s which is of cuda capability %d.%d.
167
+ PyTorch no longer supports this GPU because it is too old.
168
+ The minimum cuda capability supported by this library is %d.%d.
169
+ """
170
+
171
+ if torch.version.cuda is not None: # on ROCm we don't want this check
172
+ CUDA_VERSION = torch._C._cuda_getCompiledVersion()
173
+ for d in range(device_count()):
174
+ capability = get_device_capability(d)
175
+ major = capability[0]
176
+ minor = capability[1]
177
+ name = get_device_name(d)
178
+ current_arch = major * 10 + minor
179
+ min_arch = min(
180
+ (int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()),
181
+ default=35,
182
+ )
183
+ if current_arch < min_arch:
184
+ warnings.warn(
185
+ old_gpu_warn
186
+ % (d, name, major, minor, min_arch // 10, min_arch % 10)
187
+ )
188
+
189
+
190
+ def _check_cubins():
191
+ incompatible_device_warn = """
192
+ {} with CUDA capability sm_{} is not compatible with the current PyTorch installation.
193
+ The current PyTorch install supports CUDA capabilities {}.
194
+ If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/
195
+ """
196
+ if torch.version.cuda is None: # on ROCm we don't want this check
197
+ return
198
+ arch_list = get_arch_list()
199
+ if len(arch_list) == 0:
200
+ return
201
+ supported_sm = [int(arch.split("_")[1]) for arch in arch_list if "sm_" in arch]
202
+ for idx in range(device_count()):
203
+ cap_major, cap_minor = get_device_capability(idx)
204
+ # NVIDIA GPU compute architectures are backward compatible within major version
205
+ supported = any(sm // 10 == cap_major for sm in supported_sm)
206
+ if not supported:
207
+ device_name = get_device_name(idx)
208
+ capability = cap_major * 10 + cap_minor
209
+ warnings.warn(
210
+ incompatible_device_warn.format(
211
+ device_name, capability, " ".join(arch_list), device_name
212
+ )
213
+ )
214
+
215
+
216
+ def is_initialized():
217
+ r"""Return whether PyTorch's CUDA state has been initialized."""
218
+ return _initialized and not _is_in_bad_fork()
219
+
220
+
221
+ def _lazy_call(callable, **kwargs):
222
+ if is_initialized():
223
+ callable()
224
+ else:
225
+ # TODO(torch_deploy): this accesses linecache, which attempts to read the
226
+ # file system to get traceback info. Patch linecache or do something
227
+ # else here if this ends up being important.
228
+ global _lazy_seed_tracker
229
+ if kwargs.get("seed_all", False):
230
+ _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack())
231
+ elif kwargs.get("seed", False):
232
+ _lazy_seed_tracker.queue_seed(callable, traceback.format_stack())
233
+ else:
234
+ # Don't store the actual traceback to avoid memory cycle
235
+ _queued_calls.append((callable, traceback.format_stack()))
236
+
237
+
238
+ _lazy_call(_check_capability)
239
+ _lazy_call(_check_cubins)
240
+
241
+
242
+ class DeferredCudaCallError(Exception):
243
+ pass
244
+
245
+
246
+ OutOfMemoryError = torch._C._OutOfMemoryError
247
+
248
+
249
+ def init():
250
+ r"""Initialize PyTorch's CUDA state.
251
+
252
+ You may need to call this explicitly if you are interacting with
253
+ PyTorch via its C API, as Python bindings for CUDA functionality
254
+ will not be available until this initialization takes place.
255
+ Ordinary users should not need this, as all of PyTorch's CUDA methods
256
+ automatically initialize CUDA state on-demand.
257
+
258
+ Does nothing if the CUDA state is already initialized.
259
+ """
260
+ _lazy_init()
261
+
262
+
263
+ def _lazy_init():
264
+ global _initialized, _queued_calls
265
+ if is_initialized() or hasattr(_tls, "is_initializing"):
266
+ return
267
+ with _initialization_lock:
268
+ # We be double-checked locking, boys! This is OK because
269
+ # the above test was GIL protected anyway. The inner test
270
+ # is for when a thread blocked on some other thread which was
271
+ # doing the initialization; when they get the lock, they will
272
+ # find there is nothing left to do.
273
+ if is_initialized():
274
+ return
275
+ # It is important to prevent other threads from entering _lazy_init
276
+ # immediately, while we are still guaranteed to have the GIL, because some
277
+ # of the C calls we make below will release the GIL
278
+ if _is_in_bad_fork():
279
+ raise RuntimeError(
280
+ "Cannot re-initialize CUDA in forked subprocess. To use CUDA with "
281
+ "multiprocessing, you must use the 'spawn' start method"
282
+ )
283
+ if not hasattr(torch._C, "_cuda_getDeviceCount"):
284
+ raise AssertionError("Torch not compiled with CUDA enabled")
285
+ if _cudart is None:
286
+ raise AssertionError(
287
+ "libcudart functions unavailable. It looks like you have a broken build?"
288
+ )
289
+ # This function throws if there's a driver initialization error, no GPUs
290
+ # are found or any other error occurs
291
+ if "CUDA_MODULE_LOADING" not in os.environ:
292
+ os.environ["CUDA_MODULE_LOADING"] = "LAZY"
293
+ torch._C._cuda_init()
294
+ # Some of the queued calls may reentrantly call _lazy_init();
295
+ # we need to just return without initializing in that case.
296
+ # However, we must not let any *other* threads in!
297
+ _tls.is_initializing = True
298
+
299
+ for calls in _lazy_seed_tracker.get_calls():
300
+ if calls:
301
+ _queued_calls.append(calls)
302
+
303
+ try:
304
+ for queued_call, orig_traceback in _queued_calls:
305
+ try:
306
+ queued_call()
307
+ except Exception as e:
308
+ msg = (
309
+ f"CUDA call failed lazily at initialization with error: {str(e)}\n\n"
310
+ f"CUDA call was originally invoked at:\n\n{''.join(orig_traceback)}"
311
+ )
312
+ raise DeferredCudaCallError(msg) from e
313
+ finally:
314
+ delattr(_tls, "is_initializing")
315
+ _initialized = True
316
+
317
+
318
+ def cudart():
319
+ _lazy_init()
320
+ return _cudart
321
+
322
+
323
+ class cudaStatus:
324
+ SUCCESS: int = 0
325
+ ERROR_NOT_READY: int = 34
326
+
327
+
328
+ class CudaError(RuntimeError):
329
+ def __init__(self, code: int) -> None:
330
+ msg = _cudart.cudaGetErrorString(_cudart.cudaError(code))
331
+ super().__init__(f"{msg} ({code})")
332
+
333
+
334
+ def check_error(res: int) -> None:
335
+ if res != _cudart.cudaError.success:
336
+ raise CudaError(res)
337
+
338
+
339
+ class _DeviceGuard:
340
+ def __init__(self, index: int):
341
+ self.idx = index
342
+ self.prev_idx = -1
343
+
344
+ def __enter__(self):
345
+ self.prev_idx = torch.cuda._exchange_device(self.idx)
346
+
347
+ def __exit__(self, type: Any, value: Any, traceback: Any):
348
+ self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
349
+ return False
350
+
351
+
352
+ class device:
353
+ r"""Context-manager that changes the selected device.
354
+
355
+ Args:
356
+ device (torch.device or int): device index to select. It's a no-op if
357
+ this argument is a negative integer or ``None``.
358
+ """
359
+
360
+ def __init__(self, device: Any):
361
+ self.idx = _get_device_index(device, optional=True)
362
+ self.prev_idx = -1
363
+
364
+ def __enter__(self):
365
+ self.prev_idx = torch.cuda._exchange_device(self.idx)
366
+
367
+ def __exit__(self, type: Any, value: Any, traceback: Any):
368
+ self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
369
+ return False
370
+
371
+
372
+ class device_of(device):
373
+ r"""Context-manager that changes the current device to that of given object.
374
+
375
+ You can use both tensors and storages as arguments. If a given object is
376
+ not allocated on a GPU, this is a no-op.
377
+
378
+ Args:
379
+ obj (Tensor or Storage): object allocated on the selected device.
380
+ """
381
+
382
+ def __init__(self, obj):
383
+ idx = obj.get_device() if obj.is_cuda else -1
384
+ super().__init__(idx)
385
+
386
+
387
+ def set_device(device: _device_t) -> None:
388
+ r"""Set the current device.
389
+
390
+ Usage of this function is discouraged in favor of :any:`device`. In most
391
+ cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.
392
+
393
+ Args:
394
+ device (torch.device or int): selected device. This function is a no-op
395
+ if this argument is negative.
396
+ """
397
+ device = _get_device_index(device)
398
+ if device >= 0:
399
+ torch._C._cuda_setDevice(device)
400
+
401
+
402
+ def get_device_name(device: Optional[_device_t] = None) -> str:
403
+ r"""Get the name of a device.
404
+
405
+ Args:
406
+ device (torch.device or int, optional): device for which to return the
407
+ name. This function is a no-op if this argument is a negative
408
+ integer. It uses the current device, given by :func:`~torch.cuda.current_device`,
409
+ if :attr:`device` is ``None`` (default).
410
+
411
+ Returns:
412
+ str: the name of the device
413
+ """
414
+ return get_device_properties(device).name
415
+
416
+
417
+ def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]:
418
+ r"""Get the cuda capability of a device.
419
+
420
+ Args:
421
+ device (torch.device or int, optional): device for which to return the
422
+ device capability. This function is a no-op if this argument is
423
+ a negative integer. It uses the current device, given by
424
+ :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
425
+ (default).
426
+
427
+ Returns:
428
+ tuple(int, int): the major and minor cuda capability of the device
429
+ """
430
+ prop = get_device_properties(device)
431
+ return prop.major, prop.minor
432
+
433
+
434
+ def get_device_properties(device: _device_t) -> _CudaDeviceProperties:
435
+ r"""Get the properties of a device.
436
+
437
+ Args:
438
+ device (torch.device or int or str): device for which to return the
439
+ properties of the device.
440
+
441
+ Returns:
442
+ _CudaDeviceProperties: the properties of the device
443
+ """
444
+ _lazy_init() # will define _get_device_properties
445
+ device = _get_device_index(device, optional=True)
446
+ if device < 0 or device >= device_count():
447
+ raise AssertionError("Invalid device id")
448
+ return _get_device_properties(device) # type: ignore[name-defined]
449
+
450
+
451
+ def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool:
452
+ r"""Check if peer access between two devices is possible."""
453
+ _lazy_init()
454
+ device = _get_device_index(device, optional=True)
455
+ peer_device = _get_device_index(peer_device)
456
+ if device < 0 or device >= device_count():
457
+ raise AssertionError("Invalid device id")
458
+ if peer_device < 0 or peer_device >= device_count():
459
+ raise AssertionError("Invalid peer device id")
460
+ return torch._C._cuda_canDeviceAccessPeer(device, peer_device)
461
+
462
+
463
+ class StreamContext:
464
+ r"""Context-manager that selects a given stream.
465
+
466
+ All CUDA kernels queued within its context will be enqueued on a selected
467
+ stream.
468
+
469
+ Args:
470
+ Stream (Stream): selected stream. This manager is a no-op if it's
471
+ ``None``.
472
+ .. note:: Streams are per-device.
473
+ """
474
+ cur_stream: Optional["torch.cuda.Stream"]
475
+
476
+ def __init__(self, stream: Optional["torch.cuda.Stream"]):
477
+ self.stream = stream
478
+ self.idx = _get_device_index(None, True)
479
+ if not torch.jit.is_scripting():
480
+ if self.idx is None:
481
+ self.idx = -1
482
+
483
+ self.src_prev_stream = (
484
+ None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
485
+ )
486
+ self.dst_prev_stream = (
487
+ None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
488
+ )
489
+
490
+ def __enter__(self):
491
+ # Local cur_stream variable for type refinement
492
+ cur_stream = self.stream
493
+ # Return if stream is None or CUDA device not available
494
+ if cur_stream is None or self.idx == -1:
495
+ return
496
+ self.src_prev_stream = torch.cuda.current_stream(None)
497
+
498
+ # If the stream is not on the current device, then
499
+ # set the current stream on the device
500
+ if self.src_prev_stream.device != cur_stream.device:
501
+ with device(cur_stream.device):
502
+ self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device)
503
+ torch.cuda.set_stream(cur_stream)
504
+
505
+ def __exit__(self, type: Any, value: Any, traceback: Any):
506
+ # Local cur_stream variable for type refinement
507
+ cur_stream = self.stream
508
+ # If stream is None or no CUDA device available, return
509
+ if cur_stream is None or self.idx == -1:
510
+ return
511
+
512
+ # Reset the stream on the original device
513
+ # and destination device
514
+ if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
515
+ torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
516
+ torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type]
517
+
518
+
519
+ def stream(stream: Optional["torch.cuda.Stream"]) -> StreamContext:
520
+ r"""Wrap around the Context-manager StreamContext that selects a given stream.
521
+
522
+ Arguments:
523
+ stream (Stream): selected stream. This manager is a no-op if it's
524
+ ``None``.
525
+ ..Note:: In eager mode stream is of type Stream class while in JIT it is
526
+ an object of the custom class ``torch.classes.cuda.Stream``.
527
+ """
528
+ return StreamContext(stream)
529
+
530
+
531
+ def _set_stream_by_id(stream_id, device_index, device_type):
532
+ r"""set stream specified by the stream id, device index and
533
+ device type
534
+
535
+ Args: stream_id (int): stream id in stream pool
536
+ device_index (int): device index in topo
537
+ device_type (int): enum device type
538
+ """
539
+ torch._C._cuda_setStream(
540
+ stream_id=stream_id,
541
+ device_index=device_index,
542
+ device_type=device_type,
543
+ )
544
+
545
+
546
+ def set_stream(stream: Stream):
547
+ r"""Set the current stream.This is a wrapper API to set the stream.
548
+ Usage of this function is discouraged in favor of the ``stream``
549
+ context manager.
550
+
551
+ Args:
552
+ stream (Stream): selected stream. This function is a no-op
553
+ if this argument is ``None``.
554
+ """
555
+ if stream is None:
556
+ return
557
+ _set_stream_by_id(
558
+ stream_id=stream.stream_id,
559
+ device_index=stream.device_index,
560
+ device_type=stream.device_type,
561
+ )
562
+
563
+
564
+ def _parse_visible_devices() -> Union[List[int], List[str]]:
565
+ r"""Parse CUDA_VISIBLE_DEVICES environment variable."""
566
+ var = os.getenv("CUDA_VISIBLE_DEVICES")
567
+ if var is None:
568
+ return list(range(64))
569
+
570
+ def _strtoul(s: str) -> int:
571
+ """Return -1 or positive integer sequence string starts with."""
572
+ if not s:
573
+ return -1
574
+ for idx, c in enumerate(s):
575
+ if not (c.isdigit() or (idx == 0 and c in "+-")):
576
+ break
577
+ if idx + 1 == len(s):
578
+ idx += 1
579
+ return int(s[:idx]) if idx > 0 else -1
580
+
581
+ def parse_list_with_prefix(lst: str, prefix: str) -> List[str]:
582
+ rcs: List[str] = []
583
+ for elem in lst.split(","):
584
+ # Repeated id results in empty set
585
+ if elem in rcs:
586
+ return cast(List[str], [])
587
+ # Anything other but prefix is ignored
588
+ if not elem.startswith(prefix):
589
+ break
590
+ rcs.append(elem)
591
+ return rcs
592
+
593
+ if var.startswith("GPU-"):
594
+ return parse_list_with_prefix(var, "GPU-")
595
+ if var.startswith("MIG-"):
596
+ return parse_list_with_prefix(var, "MIG-")
597
+ # CUDA_VISIBLE_DEVICES uses something like strtoul
598
+ # which makes `1gpu2,2ampere` is equivalent to `1,2`
599
+ rc: List[int] = []
600
+ for elem in var.split(","):
601
+ x = _strtoul(elem.strip())
602
+ # Repeated ordinal results in empty set
603
+ if x in rc:
604
+ return cast(List[int], [])
605
+ # Negative value aborts the sequence
606
+ if x < 0:
607
+ break
608
+ rc.append(x)
609
+ return rc
610
+
611
+
612
+ def _raw_device_count_nvml() -> int:
613
+ r"""Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed."""
614
+ from ctypes import byref, c_int, CDLL
615
+
616
+ nvml_h = CDLL("libnvidia-ml.so.1")
617
+ rc = nvml_h.nvmlInit()
618
+ if rc != 0:
619
+ warnings.warn("Can't initialize NVML")
620
+ return -1
621
+ dev_count = c_int(-1)
622
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
623
+ if rc != 0:
624
+ warnings.warn("Can't get nvml device count")
625
+ return -1
626
+ del nvml_h
627
+ return dev_count.value
628
+
629
+
630
+ def _raw_device_uuid_nvml() -> Optional[List[str]]:
631
+ r"""Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed."""
632
+ from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer
633
+
634
+ nvml_h = CDLL("libnvidia-ml.so.1")
635
+ rc = nvml_h.nvmlInit()
636
+ if rc != 0:
637
+ warnings.warn("Can't initialize NVML")
638
+ return None
639
+ dev_count = c_int(-1)
640
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
641
+ if rc != 0:
642
+ warnings.warn("Can't get nvml device count")
643
+ return None
644
+ uuids: List[str] = []
645
+ for idx in range(dev_count.value):
646
+ dev_id = c_void_p()
647
+ rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id))
648
+ if rc != 0:
649
+ warnings.warn("Can't get device handle")
650
+ return None
651
+ buf_len = 96
652
+ buf = create_string_buffer(buf_len)
653
+ rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len)
654
+ if rc != 0:
655
+ warnings.warn("Can't get device UUID")
656
+ return None
657
+ uuids.append(buf.raw.decode("ascii").strip("\0"))
658
+ del nvml_h
659
+ return uuids
660
+
661
+
662
+ def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]:
663
+ r"""Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs."""
664
+
665
+ def uuid_to_orinal(candidate: str, uuids: List[str]) -> int:
666
+ best_match = -1
667
+ for idx, uuid in enumerate(uuids):
668
+ if not uuid.startswith(candidate):
669
+ continue
670
+ # Ambiguous candidate
671
+ if best_match != -1:
672
+ return -1
673
+ best_match = idx
674
+ return best_match
675
+
676
+ rc: List[int] = []
677
+ for candidate in candidates:
678
+ idx = uuid_to_orinal(candidate, uuids)
679
+ # First invalid ordinal stops parsing
680
+ if idx < 0:
681
+ break
682
+ # Duplicates result in empty set
683
+ if idx in rc:
684
+ return cast(List[int], [])
685
+ rc.append(idx)
686
+ return rc
687
+
688
+
689
+ def _device_count_nvml() -> int:
690
+ r"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account.
691
+
692
+ Negative value is returned if NVML discovery or initialization has failed.
693
+ """
694
+ visible_devices = _parse_visible_devices()
695
+ if not visible_devices:
696
+ return 0
697
+ try:
698
+ if type(visible_devices[0]) is str:
699
+ # Skip MIG parsing
700
+ if visible_devices[0].startswith("MIG-"):
701
+ return -1
702
+ uuids = _raw_device_uuid_nvml()
703
+ if uuids is None:
704
+ return -1
705
+ visible_devices = _transform_uuid_to_ordinals(
706
+ cast(List[str], visible_devices), uuids
707
+ )
708
+ else:
709
+ raw_cnt = _raw_device_count_nvml()
710
+ if raw_cnt <= 0:
711
+ return raw_cnt
712
+ # Trim the list up to a maximum available device
713
+ for idx, val in enumerate(visible_devices):
714
+ if cast(int, val) >= raw_cnt:
715
+ return idx
716
+ except OSError:
717
+ return -1
718
+ except AttributeError:
719
+ return -1
720
+ return len(visible_devices)
721
+
722
+
723
+ def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int:
724
+ r"""Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account."""
725
+ idx = _get_device_index(device, optional=True)
726
+ visible_devices = _parse_visible_devices()
727
+ if type(visible_devices[0]) is str:
728
+ uuids = _raw_device_uuid_nvml()
729
+ if uuids is None:
730
+ raise RuntimeError("Can't get device UUIDs")
731
+ visible_devices = _transform_uuid_to_ordinals(
732
+ cast(List[str], visible_devices), uuids
733
+ )
734
+ visible_devices = cast(List[int], visible_devices)
735
+ if idx < 0 or idx >= len(visible_devices):
736
+ raise RuntimeError(
737
+ f"device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})"
738
+ )
739
+ return visible_devices[idx]
740
+
741
+
742
+ @lru_cache(maxsize=1)
743
+ def device_count() -> int:
744
+ r"""Return the number of GPUs available."""
745
+ if not _is_compiled():
746
+ return 0
747
+ # bypass _device_count_nvml() if rocm (not supported)
748
+ nvml_count = -1 if torch.version.hip else _device_count_nvml()
749
+ return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count
750
+
751
+
752
+ def get_arch_list() -> List[str]:
753
+ r"""Return list CUDA architectures this library was compiled for."""
754
+ if not is_available():
755
+ return []
756
+ arch_flags = torch._C._cuda_getArchFlags()
757
+ if arch_flags is None:
758
+ return []
759
+ return arch_flags.split()
760
+
761
+
762
+ def get_gencode_flags() -> str:
763
+ r"""Return NVCC gencode flags this library was compiled with."""
764
+ arch_list = get_arch_list()
765
+ if len(arch_list) == 0:
766
+ return ""
767
+ arch_list_ = [arch.split("_") for arch in arch_list]
768
+ return " ".join(
769
+ [
770
+ f"-gencode compute=compute_{arch},code={kind}_{arch}"
771
+ for (kind, arch) in arch_list_
772
+ ]
773
+ )
774
+
775
+
776
+ def current_device() -> int:
777
+ r"""Return the index of a currently selected device."""
778
+ _lazy_init()
779
+ return torch._C._cuda_getDevice()
780
+
781
+
782
+ def synchronize(device: _device_t = None) -> None:
783
+ r"""Wait for all kernels in all streams on a CUDA device to complete.
784
+
785
+ Args:
786
+ device (torch.device or int, optional): device for which to synchronize.
787
+ It uses the current device, given by :func:`~torch.cuda.current_device`,
788
+ if :attr:`device` is ``None`` (default).
789
+ """
790
+ _lazy_init()
791
+ with torch.cuda.device(device):
792
+ return torch._C._cuda_synchronize()
793
+
794
+
795
+ def ipc_collect():
796
+ r"""Force collects GPU memory after it has been released by CUDA IPC.
797
+
798
+ .. note::
799
+ Checks if any sent CUDA tensors could be cleaned from the memory. Force
800
+ closes shared memory file used for reference counting if there is no
801
+ active counters. Useful when the producer process stopped actively sending
802
+ tensors and want to release unused memory.
803
+ """
804
+ _lazy_init()
805
+ return torch._C._cuda_ipc_collect()
806
+
807
+
808
+ def current_stream(device: Optional[_device_t] = None) -> Stream:
809
+ r"""Return the currently selected :class:`Stream` for a given device.
810
+
811
+ Args:
812
+ device (torch.device or int, optional): selected device. Returns
813
+ the currently selected :class:`Stream` for the current device, given
814
+ by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
815
+ (default).
816
+ """
817
+ _lazy_init()
818
+ streamdata = torch._C._cuda_getCurrentStream(
819
+ _get_device_index(device, optional=True)
820
+ )
821
+ return Stream(
822
+ stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2]
823
+ )
824
+
825
+
826
+ def default_stream(device: Optional[_device_t] = None) -> Stream:
827
+ r"""Return the default :class:`Stream` for a given device.
828
+
829
+ Args:
830
+ device (torch.device or int, optional): selected device. Returns
831
+ the default :class:`Stream` for the current device, given by
832
+ :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
833
+ (default).
834
+ """
835
+ _lazy_init()
836
+ streamdata = torch._C._cuda_getDefaultStream(
837
+ _get_device_index(device, optional=True)
838
+ )
839
+ return Stream(
840
+ stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2]
841
+ )
842
+
843
+
844
+ def current_blas_handle():
845
+ r"""Return cublasHandle_t pointer to current cuBLAS handle"""
846
+ _lazy_init()
847
+ return torch._C._cuda_getCurrentBlasHandle()
848
+
849
+
850
+ def set_sync_debug_mode(debug_mode: Union[int, str]) -> None:
851
+ r"""Set the debug mode for cuda synchronizing operations.
852
+
853
+ Args:
854
+ debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations,
855
+ if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations.
856
+
857
+ Warning:
858
+ This is an experimental feature, and not all synchronizing operations will trigger warning or error. In
859
+ particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.
860
+ """
861
+ _lazy_init()
862
+ if isinstance(debug_mode, str):
863
+ if debug_mode == "default":
864
+ debug_mode = 0
865
+ elif debug_mode == "warn":
866
+ debug_mode = 1
867
+ elif debug_mode == "error":
868
+ debug_mode = 2
869
+ else:
870
+ raise RuntimeError(
871
+ "invalid value of debug_mode, expected one of `default`, `warn`, `error`"
872
+ )
873
+
874
+ torch._C._cuda_set_sync_debug_mode(debug_mode)
875
+
876
+
877
+ def get_sync_debug_mode() -> int:
878
+ r"""Return current value of debug mode for cuda synchronizing operations."""
879
+ _lazy_init()
880
+ return torch._C._cuda_get_sync_debug_mode()
881
+
882
+
883
+ def _get_pynvml_handler(device: Optional[Union[Device, int]] = None):
884
+ if not _HAS_PYNVML:
885
+ raise ModuleNotFoundError(
886
+ "pynvml does not seem to be installed or it can't be imported."
887
+ ) from _PYNVML_ERR
888
+ from pynvml import NVMLError_DriverNotLoaded
889
+
890
+ try:
891
+ pynvml.nvmlInit()
892
+ except NVMLError_DriverNotLoaded as e:
893
+ raise RuntimeError("cuda driver can't be loaded, is cuda enabled?") from e
894
+
895
+ device = _get_nvml_device_index(device)
896
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
897
+ return handle
898
+
899
+
900
+ def memory_usage(device: Optional[Union[Device, int]] = None) -> int:
901
+ r"""Return the percent of time over the past sample period during which global (device)
902
+ memory was being read or written as given by `nvidia-smi`.
903
+
904
+ Args:
905
+ device (torch.device or int, optional): selected device. Returns
906
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
907
+ if :attr:`device` is ``None`` (default).
908
+
909
+ Warning: Each sample period may be between 1 second and 1/6 second,
910
+ depending on the product being queried.
911
+ """
912
+ handle = _get_pynvml_handler()
913
+
914
+ device = _get_nvml_device_index(device)
915
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
916
+ return pynvml.nvmlDeviceGetUtilizationRates(handle).memory
917
+
918
+
919
+ def utilization(device: Optional[Union[Device, int]] = None) -> int:
920
+ r"""Return the percent of time over the past sample period during which one or
921
+ more kernels was executing on the GPU as given by `nvidia-smi`.
922
+
923
+ Args:
924
+ device (torch.device or int, optional): selected device. Returns
925
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
926
+ if :attr:`device` is ``None`` (default).
927
+
928
+ Warning: Each sample period may be between 1 second and 1/6 second,
929
+ depending on the product being queried.
930
+ """
931
+ handle = _get_pynvml_handler(device)
932
+ device = _get_nvml_device_index(device)
933
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
934
+ return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
935
+
936
+
937
+ def temperature(device: Optional[Union[Device, int]] = None) -> int:
938
+ r"""Return the average temperature of the GPU sensor in Degrees C (Centigrades).
939
+
940
+ The average temperature is computed based on past sample period as given by `nvidia-smi`.
941
+
942
+ Args:
943
+ device (torch.device or int, optional): selected device. Returns
944
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
945
+ if :attr:`device` is ``None`` (default).
946
+
947
+ Warning: Each sample period may be between 1 second and 1/6 second,
948
+ depending on the product being queried.
949
+ """
950
+ handle = _get_pynvml_handler(device)
951
+ # 0 refers to the temperature sensor for the GPU die.
952
+ return pynvml.nvmlDeviceGetTemperature(handle, 0)
953
+
954
+
955
+ def power_draw(device: Optional[Union[Device, int]] = None) -> int:
956
+ r"""Return the average power draw of the GPU sensor in mW (MilliWatts)
957
+ over the past sample period as given by `nvidia-smi` for Fermi or newer fully supported devices.
958
+
959
+ Args:
960
+ device (torch.device or int, optional): selected device. Returns
961
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
962
+ if :attr:`device` is ``None`` (default).
963
+
964
+ Warning: Each sample period may be between 1 second and 1/6 second,
965
+ depending on the product being queried.
966
+ """
967
+ handle = _get_pynvml_handler(device)
968
+ return pynvml.nvmlDeviceGetPowerUsage(handle)
969
+
970
+
971
+ def clock_rate(device: Optional[Union[Device, int]] = None) -> int:
972
+ r"""Return the clock speed of the GPU SM in Hz Hertz over the past sample period as given by `nvidia-smi`.
973
+
974
+ Args:
975
+ device (torch.device or int, optional): selected device. Returns
976
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
977
+ if :attr:`device` is ``None`` (default).
978
+
979
+ Warning: Each sample period may be between 1 second and 1/6 second,
980
+ depending on the product being queried.
981
+ """
982
+ handle = _get_pynvml_handler(device)
983
+ return pynvml.nvmlDeviceGetClockInfo(handle, 1)
984
+
985
+
986
+ def _get_device(device: Union[int, str, torch.device]) -> torch.device:
987
+ r"""Return the torch.device type object from the passed in device.
988
+
989
+ Args:
990
+ device (torch.device or int): selected device.
991
+ """
992
+ if isinstance(device, str):
993
+ device = torch.device(device)
994
+ elif isinstance(device, int):
995
+ device = torch.device("cuda", device)
996
+ return device
997
+
998
+
999
+ def _get_generator(device: torch.device) -> torch._C.Generator:
1000
+ r"""Return the CUDA Generator object for the given device.
1001
+
1002
+ Args:
1003
+ device (torch.device): selected device.
1004
+ """
1005
+ idx = device.index
1006
+ if idx is None:
1007
+ idx = current_device()
1008
+ return torch.cuda.default_generators[idx]
1009
+
1010
+
1011
+ def _set_rng_state_offset(
1012
+ offset: int, device: Union[int, str, torch.device] = "cuda"
1013
+ ) -> None:
1014
+ r"""Set the random number generator state offset of the specified GPU.
1015
+
1016
+ Args:
1017
+ offset (int): The desired offset
1018
+ device (torch.device or int, optional): The device to set the RNG state.
1019
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
1020
+ """
1021
+ final_device = _get_device(device)
1022
+
1023
+ def cb():
1024
+ default_generator = _get_generator(final_device)
1025
+ default_generator.set_offset(offset)
1026
+
1027
+ _lazy_call(cb)
1028
+
1029
+
1030
+ def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int:
1031
+ r"""Return the random number generator state offset of the specified GPU.
1032
+
1033
+ Args:
1034
+ device (torch.device or int, optional): The device to return the RNG state offset of.
1035
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
1036
+
1037
+ .. warning::
1038
+ This function eagerly initializes CUDA.
1039
+ """
1040
+ _lazy_init()
1041
+ final_device = _get_device(device)
1042
+ default_generator = _get_generator(final_device)
1043
+ return default_generator.get_offset()
1044
+
1045
+
1046
+ from .memory import * # noqa: F403
1047
+
1048
+
1049
+ from .random import * # noqa: F403
1050
+
1051
+ ################################################################################
1052
+ # Define Storage and Tensor classes
1053
+ ################################################################################
1054
+
1055
+
1056
+ @staticmethod # type: ignore[misc]
1057
+ def _lazy_new(cls, *args, **kwargs):
1058
+ _lazy_init()
1059
+ # We may need to call lazy init again if we are a forked child
1060
+ # del _CudaBase.__new__
1061
+ return super(_CudaBase, cls).__new__(cls, *args, **kwargs)
1062
+
1063
+
1064
+ class _CudaBase:
1065
+ is_cuda = True
1066
+ is_sparse = False
1067
+
1068
+ def type(self, *args, **kwargs):
1069
+ # We could use a Protocol here to tell mypy that self has `get_device` method
1070
+ # but it is only available in the typing module on Python >= 3.8
1071
+ # or on typing_extensions module on Python >= 3.6
1072
+ with device(self.get_device()): # type: ignore[attr-defined]
1073
+ return super().type(*args, **kwargs) # type: ignore[misc]
1074
+
1075
+ __new__ = _lazy_new
1076
+
1077
+
1078
+ from torch.storage import _LegacyStorage, _warn_typed_storage_removal
1079
+
1080
+
1081
+ class _CudaLegacyStorage(_LegacyStorage):
1082
+ @classmethod
1083
+ def from_buffer(cls, *args, **kwargs):
1084
+ _warn_typed_storage_removal()
1085
+ raise RuntimeError("from_buffer: Not available for CUDA storage")
1086
+
1087
+ @classmethod
1088
+ def _new_with_weak_ptr(cls, *args, **kwargs):
1089
+ raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage")
1090
+
1091
+ @classmethod
1092
+ def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None):
1093
+ raise RuntimeError("_new_shared_filename: Not available for CUDA storage")
1094
+
1095
+
1096
+ class ByteStorage(_CudaLegacyStorage):
1097
+ @classproperty
1098
+ def dtype(self):
1099
+ _warn_typed_storage_removal()
1100
+ return self._dtype
1101
+
1102
+ @classproperty
1103
+ def _dtype(self):
1104
+ return torch.uint8
1105
+
1106
+
1107
+ class DoubleStorage(_CudaLegacyStorage):
1108
+ @classproperty
1109
+ def dtype(self):
1110
+ _warn_typed_storage_removal()
1111
+ return self._dtype
1112
+
1113
+ @classproperty
1114
+ def _dtype(self):
1115
+ return torch.double
1116
+
1117
+
1118
+ class FloatStorage(_CudaLegacyStorage):
1119
+ @classproperty
1120
+ def dtype(self):
1121
+ _warn_typed_storage_removal()
1122
+ return self._dtype
1123
+
1124
+ @classproperty
1125
+ def _dtype(self):
1126
+ return torch.float
1127
+
1128
+
1129
+ class HalfStorage(_CudaLegacyStorage):
1130
+ @classproperty
1131
+ def dtype(self):
1132
+ _warn_typed_storage_removal()
1133
+ return self._dtype
1134
+
1135
+ @classproperty
1136
+ def _dtype(self):
1137
+ return torch.half
1138
+
1139
+
1140
+ class LongStorage(_CudaLegacyStorage):
1141
+ @classproperty
1142
+ def dtype(self):
1143
+ _warn_typed_storage_removal()
1144
+ return self._dtype
1145
+
1146
+ @classproperty
1147
+ def _dtype(self):
1148
+ return torch.long
1149
+
1150
+
1151
+ class IntStorage(_CudaLegacyStorage):
1152
+ @classproperty
1153
+ def dtype(self):
1154
+ _warn_typed_storage_removal()
1155
+ return self._dtype
1156
+
1157
+ @classproperty
1158
+ def _dtype(self):
1159
+ return torch.int
1160
+
1161
+
1162
+ class ShortStorage(_CudaLegacyStorage):
1163
+ @classproperty
1164
+ def dtype(self):
1165
+ _warn_typed_storage_removal()
1166
+ return self._dtype
1167
+
1168
+ @classproperty
1169
+ def _dtype(self):
1170
+ return torch.short
1171
+
1172
+
1173
+ class CharStorage(_CudaLegacyStorage):
1174
+ @classproperty
1175
+ def dtype(self):
1176
+ _warn_typed_storage_removal()
1177
+ return self._dtype
1178
+
1179
+ @classproperty
1180
+ def _dtype(self):
1181
+ return torch.int8
1182
+
1183
+
1184
+ class BoolStorage(_CudaLegacyStorage):
1185
+ @classproperty
1186
+ def dtype(self):
1187
+ _warn_typed_storage_removal()
1188
+ return self._dtype
1189
+
1190
+ @classproperty
1191
+ def _dtype(self):
1192
+ return torch.bool
1193
+
1194
+
1195
+ class BFloat16Storage(_CudaLegacyStorage):
1196
+ @classproperty
1197
+ def dtype(self):
1198
+ _warn_typed_storage_removal()
1199
+ return self._dtype
1200
+
1201
+ @classproperty
1202
+ def _dtype(self):
1203
+ return torch.bfloat16
1204
+
1205
+
1206
+ class ComplexDoubleStorage(_CudaLegacyStorage):
1207
+ @classproperty
1208
+ def dtype(self):
1209
+ _warn_typed_storage_removal()
1210
+ return self._dtype
1211
+
1212
+ @classproperty
1213
+ def _dtype(self):
1214
+ return torch.cdouble
1215
+
1216
+
1217
+ class ComplexFloatStorage(_CudaLegacyStorage):
1218
+ @classproperty
1219
+ def dtype(self):
1220
+ _warn_typed_storage_removal()
1221
+ return self._dtype
1222
+
1223
+ @classproperty
1224
+ def _dtype(self):
1225
+ return torch.cfloat
1226
+
1227
+
1228
+ del _LegacyStorage
1229
+ del _CudaLegacyStorage
1230
+
1231
+ torch._storage_classes.add(DoubleStorage)
1232
+ torch._storage_classes.add(FloatStorage)
1233
+ torch._storage_classes.add(LongStorage)
1234
+ torch._storage_classes.add(IntStorage)
1235
+ torch._storage_classes.add(ShortStorage)
1236
+ torch._storage_classes.add(CharStorage)
1237
+ torch._storage_classes.add(ByteStorage)
1238
+ torch._storage_classes.add(HalfStorage)
1239
+ torch._storage_classes.add(BoolStorage)
1240
+ torch._storage_classes.add(BFloat16Storage)
1241
+ torch._storage_classes.add(ComplexDoubleStorage)
1242
+ torch._storage_classes.add(ComplexFloatStorage)
1243
+
1244
+
1245
+ class _WrappedTritonKernel:
1246
+ """Just a simple wrapper to store some metadata for testing purposes."""
1247
+
1248
+ def __init__(self, kernel):
1249
+ self.kernel = kernel
1250
+ self.kernel_invoked = False
1251
+
1252
+ def __call__(self, *args, **kwargs):
1253
+ res = self.kernel(*args, **kwargs)
1254
+ self.kernel_invoked = True
1255
+ return res
1256
+
1257
+
1258
+ def _register_triton_kernels():
1259
+ if torch._running_with_deploy():
1260
+ return
1261
+
1262
+ @_WrappedTritonKernel
1263
+ def kernel_impl(*args, **kwargs):
1264
+ from torch.sparse._triton_ops import bsr_dense_mm
1265
+
1266
+ return bsr_dense_mm(*args, skip_checks=True, **kwargs)
1267
+
1268
+ @_WrappedTritonKernel
1269
+ def addmm_kernel_impl(*args, **kwargs):
1270
+ from torch.sparse._triton_ops import bsr_dense_addmm
1271
+
1272
+ return bsr_dense_addmm(*args, skip_checks=True, **kwargs)
1273
+
1274
+ has_triton = importlib.util.find_spec("triton") is not None
1275
+ if has_triton:
1276
+ torch._TritonLibrary.registerOp(
1277
+ "_triton_bsr_dense_mm_out",
1278
+ "_triton_bsr_dense_mm_out(Tensor bsr, Tensor dense, *, Tensor(a!) out) -> Tensor(a!)",
1279
+ kernel_impl,
1280
+ "SparseCsrCUDA",
1281
+ )
1282
+
1283
+ torch._TritonLibrary.registerOp(
1284
+ "_triton_bsr_dense_addmm_out",
1285
+ (
1286
+ "_triton_bsr_dense_addmm_out(Tensor input, Tensor bsr, Tensor dense,"
1287
+ " *, Scalar beta, Scalar alpha, Tensor(a!) out) -> Tensor(a!)"
1288
+ ),
1289
+ addmm_kernel_impl,
1290
+ "SparseCsrCUDA",
1291
+ )
1292
+
1293
+
1294
+ _lazy_call(_register_triton_kernels)
1295
+
1296
+
1297
+ from . import amp, jiterator, nvtx, profiler, sparse
1298
+
1299
+ __all__ = [
1300
+ # Typed storage and tensors
1301
+ "BFloat16Storage",
1302
+ "BFloat16Tensor",
1303
+ "BoolStorage",
1304
+ "BoolTensor",
1305
+ "ByteStorage",
1306
+ "ByteTensor",
1307
+ "CharStorage",
1308
+ "CharTensor",
1309
+ "ComplexDoubleStorage",
1310
+ "ComplexFloatStorage",
1311
+ "DoubleStorage",
1312
+ "DoubleTensor",
1313
+ "FloatStorage",
1314
+ "FloatTensor",
1315
+ "HalfStorage",
1316
+ "HalfTensor",
1317
+ "IntStorage",
1318
+ "IntTensor",
1319
+ "LongStorage",
1320
+ "LongTensor",
1321
+ "ShortStorage",
1322
+ "ShortTensor",
1323
+ "CUDAGraph",
1324
+ "CudaError",
1325
+ "DeferredCudaCallError",
1326
+ "Event",
1327
+ "ExternalStream",
1328
+ "OutOfMemoryError",
1329
+ "Stream",
1330
+ "StreamContext",
1331
+ "amp",
1332
+ "caching_allocator_alloc",
1333
+ "caching_allocator_delete",
1334
+ "can_device_access_peer",
1335
+ "check_error",
1336
+ "cudaStatus",
1337
+ "cudart",
1338
+ "current_blas_handle",
1339
+ "current_device",
1340
+ "current_stream",
1341
+ "default_generators",
1342
+ "default_stream",
1343
+ "device",
1344
+ "device_count",
1345
+ "device_of",
1346
+ "empty_cache",
1347
+ "get_allocator_backend",
1348
+ "CUDAPluggableAllocator",
1349
+ "change_current_allocator",
1350
+ "get_arch_list",
1351
+ "get_device_capability",
1352
+ "get_device_name",
1353
+ "get_device_properties",
1354
+ "get_gencode_flags",
1355
+ "get_rng_state",
1356
+ "get_rng_state_all",
1357
+ "get_sync_debug_mode",
1358
+ "graph",
1359
+ "graph_pool_handle",
1360
+ "graphs",
1361
+ "has_half",
1362
+ "has_magma",
1363
+ "init",
1364
+ "initial_seed",
1365
+ "ipc_collect",
1366
+ "is_available",
1367
+ "is_bf16_supported",
1368
+ "is_current_stream_capturing",
1369
+ "is_initialized",
1370
+ "jiterator",
1371
+ "list_gpu_processes",
1372
+ "make_graphed_callables",
1373
+ "manual_seed",
1374
+ "manual_seed_all",
1375
+ "max_memory_allocated",
1376
+ "max_memory_cached",
1377
+ "max_memory_reserved",
1378
+ "mem_get_info",
1379
+ "memory",
1380
+ "memory_allocated",
1381
+ "memory_cached",
1382
+ "memory_reserved",
1383
+ "memory_snapshot",
1384
+ "memory_stats",
1385
+ "memory_stats_as_nested_dict",
1386
+ "memory_summary",
1387
+ "memory_usage",
1388
+ "temperature",
1389
+ "power_draw",
1390
+ "clock_rate",
1391
+ "nccl",
1392
+ "nvtx",
1393
+ "profiler",
1394
+ "random",
1395
+ "reset_accumulated_memory_stats",
1396
+ "reset_max_memory_allocated",
1397
+ "reset_max_memory_cached",
1398
+ "reset_peak_memory_stats",
1399
+ "seed",
1400
+ "seed_all",
1401
+ "set_device",
1402
+ "set_per_process_memory_fraction",
1403
+ "set_rng_state",
1404
+ "set_rng_state_all",
1405
+ "set_stream",
1406
+ "set_sync_debug_mode",
1407
+ "sparse",
1408
+ "stream",
1409
+ "streams",
1410
+ "synchronize",
1411
+ "utilization",
1412
+ ]
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (40.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc ADDED
Binary file (398 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc ADDED
Binary file (19 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc ADDED
Binary file (34 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc ADDED
Binary file (3.02 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (176 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc ADDED
Binary file (9.61 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/amp/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .autocast_mode import autocast, custom_bwd, custom_fwd
2
+ from .common import amp_definitely_not_available
3
+ from .grad_scaler import GradScaler
4
+
5
+ __all__ = [
6
+ "amp_definitely_not_available",
7
+ "autocast",
8
+ "custom_bwd",
9
+ "custom_fwd",
10
+ "GradScaler",
11
+ ]
venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (421 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc ADDED
Binary file (439 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+
4
+ import torch
5
+
6
+ try:
7
+ import numpy as np
8
+
9
+ HAS_NUMPY = True
10
+ except ModuleNotFoundError:
11
+ np = None # type: ignore[assignment]
12
+ from typing import Any
13
+
14
+ __all__ = ["autocast", "custom_fwd", "custom_bwd"]
15
+
16
+
17
+ class autocast(torch.amp.autocast_mode.autocast):
18
+ r"""See :class:`torch.autocast`.
19
+
20
+ ``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)``
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ enabled: bool = True,
26
+ dtype: torch.dtype = torch.float16,
27
+ cache_enabled: bool = True,
28
+ ):
29
+ if torch._jit_internal.is_scripting():
30
+ self._enabled = enabled
31
+ self.device = "cuda"
32
+ self.fast_dtype = dtype
33
+ return
34
+ super().__init__(
35
+ "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
36
+ )
37
+
38
+ def __enter__(self):
39
+ if torch._jit_internal.is_scripting():
40
+ return self
41
+ return super().__enter__()
42
+
43
+ # TODO: discuss a unified TorchScript-friendly API for autocast
44
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
45
+ if torch._jit_internal.is_scripting():
46
+ return
47
+ return super().__exit__(exc_type, exc_val, exc_tb)
48
+
49
+ def __call__(self, func):
50
+ if torch._jit_internal.is_scripting():
51
+ return func
52
+ return super().__call__(func)
53
+
54
+
55
+ # Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which
56
+ # may be falsely detected as "Iterables."
57
+ def _cast(value, dtype):
58
+ if isinstance(value, torch.Tensor):
59
+ is_eligible = (
60
+ value.is_floating_point()
61
+ and value.is_cuda
62
+ and (value.dtype is not torch.float64)
63
+ )
64
+ return value.to(dtype) if is_eligible else value
65
+ elif isinstance(value, (str, bytes)):
66
+ return value
67
+ elif HAS_NUMPY and isinstance(value, np.ndarray):
68
+ return value
69
+ elif isinstance(value, collections.abc.Mapping):
70
+ return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()}
71
+ elif isinstance(value, collections.abc.Iterable):
72
+ iterable = (_cast(v, dtype) for v in value)
73
+ if isinstance(value, (list, tuple)):
74
+ return type(value)(iterable)
75
+ else:
76
+ return iterable
77
+ else:
78
+ return value
79
+
80
+
81
+ # custom_fwd is a decorator that may or may not be used with arguments, following
82
+ # https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument.
83
+ # this works:
84
+ # @custom_fwd
85
+ # def forward(...):
86
+ # this also works:
87
+ # @custom_fwd(cast_inputs=torch.float)
88
+ # def forward(...):
89
+ def custom_fwd(fwd=None, *, cast_inputs=None):
90
+ """
91
+ Create a helper decorator for ``forward`` methods of custom autograd functions.
92
+
93
+ Autograd functions are subclasses of :class:`torch.autograd.Function`.
94
+ See the :ref:`example page<amp-custom-examples>` for more detail.
95
+
96
+ Args:
97
+ cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``,
98
+ when ``forward`` runs in an autocast-enabled region, casts incoming
99
+ floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected),
100
+ then executes ``forward`` with autocast disabled.
101
+ If ``None``, ``forward``'s internal ops execute with the current autocast state.
102
+
103
+ .. note::
104
+ If the decorated ``forward`` is called outside an autocast-enabled region,
105
+ :func:`custom_fwd<custom_fwd>` is a no-op and ``cast_inputs`` has no effect.
106
+ """
107
+ if fwd is None:
108
+ return functools.partial(custom_fwd, cast_inputs=cast_inputs)
109
+
110
+ @functools.wraps(fwd)
111
+ def decorate_fwd(*args, **kwargs):
112
+ args[0]._dtype = torch.get_autocast_gpu_dtype()
113
+ if cast_inputs is None:
114
+ args[0]._fwd_used_autocast = torch.is_autocast_enabled()
115
+ return fwd(*args, **kwargs)
116
+ else:
117
+ autocast_context = torch.is_autocast_enabled()
118
+ args[0]._fwd_used_autocast = False
119
+ if autocast_context:
120
+ with autocast(enabled=False):
121
+ return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs))
122
+ else:
123
+ return fwd(*args, **kwargs)
124
+
125
+ return decorate_fwd
126
+
127
+
128
+ # Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate
129
+ # cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match
130
+ # cast_inputs supplied to custom_fwd.
131
+ def custom_bwd(bwd):
132
+ """Create a helper decorator for backward methods of custom autograd functions.
133
+
134
+ Autograd functions are subclasses of :class:`torch.autograd.Function`.
135
+ Ensures that ``backward`` executes with the same autocast state as ``forward``.
136
+ See the :ref:`example page<amp-custom-examples>` for more detail.
137
+ """
138
+
139
+ @functools.wraps(bwd)
140
+ def decorate_bwd(*args, **kwargs):
141
+ with autocast(enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype):
142
+ return bwd(*args, **kwargs)
143
+
144
+ return decorate_bwd
venv/lib/python3.10/site-packages/torch/cuda/amp/common.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.util import find_spec
2
+
3
+ import torch
4
+
5
+ __all__ = ["amp_definitely_not_available"]
6
+
7
+
8
+ def amp_definitely_not_available():
9
+ return not (torch.cuda.is_available() or find_spec("torch_xla"))
venv/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.amp.grad_scaler import OptState
3
+
4
+ __all__ = ["GradScaler", "OptState"]
5
+
6
+
7
+ class GradScaler(torch.amp.GradScaler):
8
+ r"""
9
+ See :class:`torch.amp.GradScaler`.
10
+ ``torch.cuda.amp.GradScaler(args...)`` is equivalent to ``torch.amp.GradScaler("cuda", args...)``
11
+ """
12
+
13
+ def __init__(
14
+ self,
15
+ init_scale: float = 2.0**16,
16
+ growth_factor: float = 2.0,
17
+ backoff_factor: float = 0.5,
18
+ growth_interval: int = 2000,
19
+ enabled: bool = True,
20
+ ) -> None:
21
+ super().__init__(
22
+ "cuda",
23
+ init_scale=init_scale,
24
+ growth_factor=growth_factor,
25
+ backoff_factor=backoff_factor,
26
+ growth_interval=growth_interval,
27
+ enabled=enabled,
28
+ )
venv/lib/python3.10/site-packages/torch/cuda/jiterator.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Callable, List
3
+
4
+ import torch
5
+ from torch import Tensor
6
+
7
+ __all__: List[str] = []
8
+
9
+
10
+ class _CodeParser:
11
+ def __init__(self, code_string: str):
12
+ optional_ws = r"\s*"
13
+ required_ws = r"\s+"
14
+ template_params = r"(?P<template_params>\<.+\>)"
15
+ return_type = r"(?P<return_type>\w+)"
16
+ function_name = r"(?P<function_name>\w+)"
17
+ function_params = r"(?P<function_params>\(.+\))"
18
+ function_body = r"(?P<function_body>\{.+\})"
19
+
20
+ pattern = (
21
+ optional_ws
22
+ + "template"
23
+ + optional_ws
24
+ + template_params
25
+ + optional_ws
26
+ + return_type
27
+ + required_ws
28
+ + function_name
29
+ + optional_ws
30
+ + function_params
31
+ + optional_ws
32
+ + function_body
33
+ + optional_ws
34
+ )
35
+
36
+ result = re.match(
37
+ pattern, code_string, re.DOTALL
38
+ ) # DOTALL for matching multiline
39
+
40
+ if result is None:
41
+ raise Exception(
42
+ f"Couldn't parse code, please check correctness:\n {code_string}"
43
+ )
44
+
45
+ self.template_params = result["template_params"]
46
+ self.return_type = result["return_type"]
47
+ self.function_name = result["function_name"]
48
+ self.function_params = result["function_params"]
49
+ self.function_body = result["function_body"]
50
+
51
+
52
+ class _JittedFunction:
53
+ def __init__(
54
+ self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs
55
+ ):
56
+ self.code_string = code_string
57
+
58
+ assert (
59
+ return_by_ref or num_outputs == 1
60
+ ), "Return by value only works for single output. "
61
+ self.return_by_ref = return_by_ref
62
+ self.num_outputs = num_outputs
63
+
64
+ parsed_code = _CodeParser(code_string)
65
+ self.kernel_name = parsed_code.function_name
66
+
67
+ self.kwargs_dict = kwargs
68
+ self.is_cuda_available = torch.cuda.is_available()
69
+
70
+ def __call__(self, *tensors: Tensor, **kwargs):
71
+ # Jiterator follow torch.cuda's lazy initialization behavior
72
+ # Defer checking cuda's availability at the function invocation time
73
+ assert (
74
+ self.is_cuda_available
75
+ ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available."
76
+
77
+ assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs."
78
+
79
+ expanded_kwargs = self.kwargs_dict.copy()
80
+ for key, value in kwargs.items():
81
+ if key in self.kwargs_dict:
82
+ expanded_kwargs[key] = value
83
+ else:
84
+ raise KeyError(f"{key} is not declared in function definition")
85
+
86
+ return torch._C._cuda_jiterator_compile_and_launch_kernel(
87
+ self.code_string,
88
+ self.kernel_name,
89
+ self.return_by_ref,
90
+ self.num_outputs,
91
+ tensors,
92
+ expanded_kwargs,
93
+ )
94
+
95
+
96
+ def _create_jit_fn(code_string: str, **kwargs) -> Callable:
97
+ """
98
+ Create a jiterator-generated cuda kernel for an elementwise op.
99
+
100
+ The code string has to be a valid CUDA function that describes the computation for a single element. The code
101
+ string has to follow the c++ template pattern, as shown in the example below. This function will be inlined
102
+ into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as
103
+ local temp dir.
104
+
105
+ Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion.
106
+
107
+ Args:
108
+ code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value.
109
+ kwargs (Dict, optional): Keyword arguments for generated function
110
+
111
+ Example::
112
+
113
+ code_string = "template <typename T> T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }"
114
+ jitted_fn = create_jit_fn(code_string, alpha=1.0)
115
+ a = torch.rand(3, device='cuda')
116
+ b = torch.rand(3, device='cuda')
117
+ # invoke jitted function like a regular python function
118
+ result = jitted_fn(a, b, alpha=3.14)
119
+
120
+ code_string also allows multiple function definitions, and the last function will be treated as the entry function.
121
+
122
+ Example::
123
+
124
+ code_string = "template <typename T> T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }"
125
+ code_string += "template <typename T> T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }"
126
+ jitted_fn = create_jit_fn(code_string, val=0.0)
127
+ a = torch.rand(3, device='cuda')
128
+ b = torch.rand(3, device='cuda')
129
+ # invoke jitted function like a regular python function
130
+ result = jitted_fn(a, b) # using default val=0.0
131
+
132
+ Jiterator can be used together with python registration to override an operator's cuda kernel.
133
+ Following example is overriding gelu's cuda kernel with relu.
134
+
135
+ Example::
136
+
137
+ code_string = "template <typename T> T my_gelu(T a) { return a > 0 ? a : 0; }"
138
+ my_gelu = create_jit_fn(code_string)
139
+ my_lib = torch.library.Library("aten", "IMPL")
140
+ my_lib.impl('aten::gelu', my_gelu, "CUDA")
141
+ # torch.nn.GELU and torch.nn.function.gelu are now overridden
142
+ a = torch.rand(3, device='cuda')
143
+ torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a))
144
+
145
+ .. warning::
146
+ This API is in beta and may change in future releases.
147
+
148
+ .. warning::
149
+ This API only supports up to 8 inputs and 1 output
150
+
151
+ .. warning::
152
+ All input tensors must live in CUDA device
153
+ """
154
+ return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs)
155
+
156
+
157
+ def _create_multi_output_jit_fn(
158
+ code_string: str, num_outputs: int, **kwargs
159
+ ) -> Callable:
160
+ """
161
+ Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs.
162
+
163
+ Args:
164
+ code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference.
165
+ num_outputs(int): number of outputs return by the kernel
166
+ kwargs (Dict, optional): Keyword arguments for generated function
167
+
168
+ Example::
169
+
170
+ code_string = "template <typename T> void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }"
171
+ jitted_fn = create_jit_fn(code_string, alpha=1.0)
172
+ a = torch.rand(3, device='cuda')
173
+ b = torch.rand(3, device='cuda')
174
+ # invoke jitted function like a regular python function
175
+ result = jitted_fn(a, b, alpha=3.14)
176
+
177
+ .. warning::
178
+ This API is in beta and may change in future releases.
179
+
180
+ .. warning::
181
+ This API only supports up to 8 inputs and 8 outputs
182
+ """
183
+ return _JittedFunction(
184
+ code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs
185
+ )
venv/lib/python3.10/site-packages/torch/cuda/memory.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""This package adds support for device memory management implemented in CUDA."""
2
+
3
+ import collections
4
+ import contextlib
5
+ import ctypes
6
+ import pickle
7
+ import sys
8
+ import warnings
9
+ from inspect import signature
10
+
11
+ from typing import Any, Dict, Optional, Tuple, Union
12
+
13
+ import torch
14
+ from torch import _C
15
+
16
+ from torch.types import Device
17
+ from .._utils import _dummy_type
18
+ from . import _get_device_index, _get_nvml_device_index, _lazy_init, is_initialized
19
+
20
+ from ._memory_viz import memory as _memory, segments as _segments
21
+
22
+ __all__ = [
23
+ "caching_allocator_alloc",
24
+ "caching_allocator_delete",
25
+ "set_per_process_memory_fraction",
26
+ "empty_cache",
27
+ "memory_stats",
28
+ "memory_stats_as_nested_dict",
29
+ "reset_accumulated_memory_stats",
30
+ "reset_peak_memory_stats",
31
+ "reset_max_memory_allocated",
32
+ "reset_max_memory_cached",
33
+ "memory_allocated",
34
+ "max_memory_allocated",
35
+ "memory_reserved",
36
+ "max_memory_reserved",
37
+ "memory_cached",
38
+ "max_memory_cached",
39
+ "memory_snapshot",
40
+ "memory_summary",
41
+ "list_gpu_processes",
42
+ "mem_get_info",
43
+ "get_allocator_backend",
44
+ "CUDAPluggableAllocator",
45
+ "change_current_allocator",
46
+ ]
47
+
48
+
49
+ if not hasattr(torch._C, "_cuda_CUDAAllocator"):
50
+ # Define dummy base classes
51
+ torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator")
52
+
53
+
54
+ def _host_allocator():
55
+ _lazy_init()
56
+ return torch._C._cuda_cudaHostAllocator()
57
+
58
+
59
+ @contextlib.contextmanager
60
+ def _free_mutex():
61
+ torch._C._cuda_lock_mutex()
62
+ try:
63
+ yield
64
+ finally:
65
+ torch._C._cuda_unlock_mutex()
66
+
67
+
68
+ def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
69
+ r"""Perform a memory allocation using the CUDA memory allocator.
70
+
71
+ Memory is allocated for a given device and a stream, this
72
+ function is intended to be used for interoperability with other
73
+ frameworks. Allocated memory is released through
74
+ :func:`~torch.cuda.caching_allocator_delete`.
75
+
76
+ Args:
77
+ size (int): number of bytes to be allocated.
78
+ device (torch.device or int, optional): selected device. If it is
79
+ ``None`` the default CUDA device is used.
80
+ stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
81
+ the default stream for the selected device is used.
82
+
83
+ .. note::
84
+ See :ref:`cuda-memory-management` for more details about GPU memory
85
+ management.
86
+ """
87
+ if device is None:
88
+ device = torch.cuda.current_device()
89
+ device = _get_device_index(device)
90
+ if stream is None:
91
+ stream = torch.cuda.current_stream(device)
92
+ if isinstance(stream, torch.cuda.streams.Stream):
93
+ stream = stream.cuda_stream
94
+ if not isinstance(stream, int):
95
+ raise TypeError(
96
+ "Invalid type for stream argument, must be "
97
+ "`torch.cuda.Stream` or `int` representing a pointer "
98
+ "to a existing stream"
99
+ )
100
+ with torch.cuda.device(device):
101
+ return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
102
+
103
+
104
+ def caching_allocator_delete(mem_ptr):
105
+ r"""Delete memory allocated using the CUDA memory allocator.
106
+
107
+ Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
108
+ is freed here. The associated device and stream are tracked inside
109
+ the allocator.
110
+
111
+ Args:
112
+ mem_ptr (int): memory address to be freed by the allocator.
113
+
114
+ .. note::
115
+ See :ref:`cuda-memory-management` for more details about GPU memory
116
+ management.
117
+ """
118
+ torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
119
+
120
+
121
+ def set_per_process_memory_fraction(
122
+ fraction, device: Union[Device, int] = None
123
+ ) -> None:
124
+ r"""Set memory fraction for a process.
125
+
126
+ The fraction is used to limit an caching allocator to allocated memory on a CUDA device.
127
+ The allowed value equals the total visible memory multiplied fraction.
128
+ If trying to allocate more than the allowed value in a process, will raise an out of
129
+ memory error in allocator.
130
+
131
+ Args:
132
+ fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction.
133
+ device (torch.device or int, optional): selected device. If it is
134
+ ``None`` the default CUDA device is used.
135
+ .. note::
136
+ In general, the total available free memory is less than the total capacity.
137
+ """
138
+ _lazy_init()
139
+ if device is None:
140
+ device = torch.cuda.current_device()
141
+ device = _get_device_index(device)
142
+ if not isinstance(fraction, float):
143
+ raise TypeError("Invalid type for fraction argument, must be `float`")
144
+ if fraction < 0 or fraction > 1:
145
+ raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1")
146
+
147
+ torch._C._cuda_setMemoryFraction(fraction, device)
148
+
149
+
150
+ def empty_cache() -> None:
151
+ r"""Release all unoccupied cached memory currently held by the caching
152
+ allocator so that those can be used in other GPU application and visible in
153
+ `nvidia-smi`.
154
+
155
+ .. note::
156
+ :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
157
+ memory available for PyTorch. However, it may help reduce fragmentation
158
+ of GPU memory in certain cases. See :ref:`cuda-memory-management` for
159
+ more details about GPU memory management.
160
+ """
161
+ if is_initialized():
162
+ torch._C._cuda_emptyCache()
163
+
164
+
165
+ def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
166
+ r"""Return a dictionary of CUDA memory allocator statistics for a given device.
167
+
168
+ The return value of this function is a dictionary of statistics, each of
169
+ which is a non-negative integer.
170
+
171
+ Core statistics:
172
+
173
+ - ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
174
+ number of allocation requests received by the memory allocator.
175
+ - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
176
+ amount of allocated memory.
177
+ - ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
178
+ number of reserved segments from ``cudaMalloc()``.
179
+ - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
180
+ amount of reserved memory.
181
+ - ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
182
+ number of active memory blocks.
183
+ - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
184
+ amount of active memory.
185
+ - ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
186
+ number of inactive, non-releasable memory blocks.
187
+ - ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
188
+ amount of inactive, non-releasable memory.
189
+
190
+ For these core statistics, values are broken down as follows.
191
+
192
+ Pool type:
193
+
194
+ - ``all``: combined statistics across all memory pools.
195
+ - ``large_pool``: statistics for the large allocation pool
196
+ (as of October 2019, for size >= 1MB allocations).
197
+ - ``small_pool``: statistics for the small allocation pool
198
+ (as of October 2019, for size < 1MB allocations).
199
+
200
+ Metric type:
201
+
202
+ - ``current``: current value of this metric.
203
+ - ``peak``: maximum value of this metric.
204
+ - ``allocated``: historical total increase in this metric.
205
+ - ``freed``: historical total decrease in this metric.
206
+
207
+ In addition to the core statistics, we also provide some simple event
208
+ counters:
209
+
210
+ - ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
211
+ result in a cache flush and retry.
212
+ - ``"num_ooms"``: number of out-of-memory errors thrown.
213
+
214
+ The caching allocator can be configured via ENV to not split blocks larger than a
215
+ defined size (see Memory Management section of the Cuda Semantics documentation).
216
+ This helps avoid memory fragmentation but may have a performance
217
+ penalty. Additional outputs to assist with tuning and evaluating impact:
218
+
219
+ - ``"max_split_size"``: blocks above this size will not be split.
220
+ - ``"oversize_allocations.{current,peak,allocated,freed}"``:
221
+ number of over-size allocation requests received by the memory allocator.
222
+ - ``"oversize_segments.{current,peak,allocated,freed}"``:
223
+ number of over-size reserved segments from ``cudaMalloc()``.
224
+
225
+ The caching allocator can be configured via ENV to round memory allocations in order
226
+ to reduce fragmentation. Sometimes the overhead from rounding can be higher than
227
+ the fragmentation it helps reduce. The following stat can be used to check if
228
+ rounding adds too much overhead:
229
+
230
+ - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
231
+ memory requested by client code, compare this with allocated_bytes to check if
232
+ allocation rounding adds too much overhead.
233
+
234
+ Args:
235
+ device (torch.device or int, optional): selected device. Returns
236
+ statistics for the current device, given by :func:`~torch.cuda.current_device`,
237
+ if :attr:`device` is ``None`` (default).
238
+
239
+ .. note::
240
+ See :ref:`cuda-memory-management` for more details about GPU memory
241
+ management.
242
+
243
+ .. note::
244
+ With :ref:`backend:cudaMallocAsync<cuda-memory-envvars>`, some stats are not
245
+ meaningful, and are always reported as zero.
246
+ """
247
+ result = []
248
+
249
+ def _recurse_add_to_result(prefix, obj):
250
+ if isinstance(obj, dict):
251
+ if len(prefix) > 0:
252
+ prefix += "."
253
+ for k, v in obj.items():
254
+ _recurse_add_to_result(prefix + k, v)
255
+ else:
256
+ result.append((prefix, obj))
257
+
258
+ stats = memory_stats_as_nested_dict(device=device)
259
+ _recurse_add_to_result("", stats)
260
+ result.sort()
261
+
262
+ return collections.OrderedDict(result)
263
+
264
+
265
+ def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]:
266
+ r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
267
+ if not is_initialized():
268
+ return {}
269
+ device = _get_device_index(device, optional=True)
270
+ return torch._C._cuda_memoryStats(device)
271
+
272
+
273
+ def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None:
274
+ r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator.
275
+
276
+ See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
277
+ the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
278
+ `"num_alloc_retries"` and `"num_ooms"`.
279
+
280
+ Args:
281
+ device (torch.device or int, optional): selected device. Returns
282
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
283
+ if :attr:`device` is ``None`` (default).
284
+
285
+ .. note::
286
+ See :ref:`cuda-memory-management` for more details about GPU memory
287
+ management.
288
+ """
289
+ device = _get_device_index(device, optional=True)
290
+ return torch._C._cuda_resetAccumulatedMemoryStats(device)
291
+
292
+
293
+ def reset_peak_memory_stats(device: Union[Device, int] = None) -> None:
294
+ r"""Reset the "peak" stats tracked by the CUDA memory allocator.
295
+
296
+ See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
297
+ `"peak"` key in each individual stat dict.
298
+
299
+ Args:
300
+ device (torch.device or int, optional): selected device. Returns
301
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
302
+ if :attr:`device` is ``None`` (default).
303
+
304
+ .. note::
305
+ See :ref:`cuda-memory-management` for more details about GPU memory
306
+ management.
307
+ """
308
+ device = _get_device_index(device, optional=True)
309
+ return torch._C._cuda_resetPeakMemoryStats(device)
310
+
311
+
312
+ def reset_max_memory_allocated(device: Union[Device, int] = None) -> None:
313
+ r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device.
314
+
315
+ See :func:`~torch.cuda.max_memory_allocated` for details.
316
+
317
+ Args:
318
+ device (torch.device or int, optional): selected device. Returns
319
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
320
+ if :attr:`device` is ``None`` (default).
321
+
322
+ .. warning::
323
+ This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
324
+ /all/ peak memory stats.
325
+
326
+ .. note::
327
+ See :ref:`cuda-memory-management` for more details about GPU memory
328
+ management.
329
+ """
330
+ warnings.warn(
331
+ "torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
332
+ "which resets /all/ peak memory stats.",
333
+ FutureWarning,
334
+ )
335
+ return reset_peak_memory_stats(device=device)
336
+
337
+
338
+ def reset_max_memory_cached(device: Union[Device, int] = None) -> None:
339
+ r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.
340
+
341
+ See :func:`~torch.cuda.max_memory_cached` for details.
342
+
343
+ Args:
344
+ device (torch.device or int, optional): selected device. Returns
345
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
346
+ if :attr:`device` is ``None`` (default).
347
+
348
+ .. warning::
349
+ This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
350
+ /all/ peak memory stats.
351
+
352
+ .. note::
353
+ See :ref:`cuda-memory-management` for more details about GPU memory
354
+ management.
355
+ """
356
+ warnings.warn(
357
+ "torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
358
+ "which resets /all/ peak memory stats.",
359
+ FutureWarning,
360
+ )
361
+ return reset_peak_memory_stats(device=device)
362
+
363
+
364
+ def memory_allocated(device: Union[Device, int] = None) -> int:
365
+ r"""Return the current GPU memory occupied by tensors in bytes for a given device.
366
+
367
+ Args:
368
+ device (torch.device or int, optional): selected device. Returns
369
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
370
+ if :attr:`device` is ``None`` (default).
371
+
372
+ .. note::
373
+ This is likely less than the amount shown in `nvidia-smi` since some
374
+ unused memory can be held by the caching allocator and some context
375
+ needs to be created on GPU. See :ref:`cuda-memory-management` for more
376
+ details about GPU memory management.
377
+ """
378
+ return memory_stats(device=device).get("allocated_bytes.all.current", 0)
379
+
380
+
381
+ def max_memory_allocated(device: Union[Device, int] = None) -> int:
382
+ r"""Return the maximum GPU memory occupied by tensors in bytes for a given device.
383
+
384
+ By default, this returns the peak allocated memory since the beginning of
385
+ this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to
386
+ reset the starting point in tracking this metric. For example, these two
387
+ functions can measure the peak allocated memory usage of each iteration in a
388
+ training loop.
389
+
390
+ Args:
391
+ device (torch.device or int, optional): selected device. Returns
392
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
393
+ if :attr:`device` is ``None`` (default).
394
+
395
+ .. note::
396
+ See :ref:`cuda-memory-management` for more details about GPU memory
397
+ management.
398
+ """
399
+ return memory_stats(device=device).get("allocated_bytes.all.peak", 0)
400
+
401
+
402
+ def memory_reserved(device: Union[Device, int] = None) -> int:
403
+ r"""Return the current GPU memory managed by the caching allocator in bytes for a given device.
404
+
405
+ Args:
406
+ device (torch.device or int, optional): selected device. Returns
407
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
408
+ if :attr:`device` is ``None`` (default).
409
+
410
+ .. note::
411
+ See :ref:`cuda-memory-management` for more details about GPU memory
412
+ management.
413
+ """
414
+ return memory_stats(device=device).get("reserved_bytes.all.current", 0)
415
+
416
+
417
+ def max_memory_reserved(device: Union[Device, int] = None) -> int:
418
+ r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device.
419
+
420
+ By default, this returns the peak cached memory since the beginning of this
421
+ program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset
422
+ the starting point in tracking this metric. For example, these two functions
423
+ can measure the peak cached memory amount of each iteration in a training
424
+ loop.
425
+
426
+ Args:
427
+ device (torch.device or int, optional): selected device. Returns
428
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
429
+ if :attr:`device` is ``None`` (default).
430
+
431
+ .. note::
432
+ See :ref:`cuda-memory-management` for more details about GPU memory
433
+ management.
434
+ """
435
+ return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
436
+
437
+
438
+ def memory_cached(device: Union[Device, int] = None) -> int:
439
+ r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
440
+ warnings.warn(
441
+ "torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
442
+ FutureWarning,
443
+ )
444
+ return memory_reserved(device=device)
445
+
446
+
447
+ def max_memory_cached(device: Union[Device, int] = None) -> int:
448
+ r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
449
+ warnings.warn(
450
+ "torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
451
+ FutureWarning,
452
+ )
453
+ return max_memory_reserved(device=device)
454
+
455
+
456
+ def memory_snapshot():
457
+ r"""Return a snapshot of the CUDA memory allocator state across all devices.
458
+
459
+ Interpreting the output of this function requires familiarity with the
460
+ memory allocator internals.
461
+
462
+ .. note::
463
+ See :ref:`cuda-memory-management` for more details about GPU memory
464
+ management.
465
+ """
466
+ return torch._C._cuda_memorySnapshot()["segments"]
467
+
468
+
469
+ def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str:
470
+ r"""Return a human-readable printout of the current memory allocator statistics for a given device.
471
+
472
+ This can be useful to display periodically during training, or when
473
+ handling out-of-memory exceptions.
474
+
475
+ Args:
476
+ device (torch.device or int, optional): selected device. Returns
477
+ printout for the current device, given by :func:`~torch.cuda.current_device`,
478
+ if :attr:`device` is ``None`` (default).
479
+ abbreviated (bool, optional): whether to return an abbreviated summary
480
+ (default: False).
481
+
482
+ .. note::
483
+ See :ref:`cuda-memory-management` for more details about GPU memory
484
+ management.
485
+ """
486
+ device = _get_device_index(device, optional=True)
487
+ stats = memory_stats(device=device)
488
+
489
+ def _format_size(sz, pref_sz):
490
+ prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"]
491
+ prefix = prefixes[0]
492
+ for new_prefix in prefixes[1:]:
493
+ if pref_sz < 768 * 1024:
494
+ break
495
+ prefix = new_prefix
496
+ sz //= 1024
497
+ pref_sz /= 1024
498
+ return f"{sz:6d} {prefix}"
499
+
500
+ def _format_count(cnt, pref_cnt):
501
+ prefixes = [" ", "K", "M"]
502
+ prefix = prefixes[0]
503
+ for new_prefix in prefixes[1:]:
504
+ if pref_cnt < 750 * 1000:
505
+ break
506
+ prefix = new_prefix
507
+ cnt //= 1000
508
+ pref_cnt /= 1000
509
+ return f"{cnt:7d} {prefix} "
510
+
511
+ metrics_to_display = [
512
+ ("allocated_bytes", "Allocated memory", _format_size),
513
+ ("active_bytes", "Active memory", _format_size),
514
+ ("requested_bytes", "Requested memory", _format_size),
515
+ ("reserved_bytes", "GPU reserved memory", _format_size),
516
+ ("inactive_split_bytes", "Non-releasable memory", _format_size),
517
+ ("allocation", "Allocations", _format_count),
518
+ ("active", "Active allocs", _format_count),
519
+ ("segment", "GPU reserved segments", _format_count),
520
+ ("inactive_split", "Non-releasable allocs", _format_count),
521
+ ]
522
+
523
+ lines = []
524
+ lines.append("=" * 75)
525
+ lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ")
526
+ lines.append("-" * 75)
527
+ lines.append(
528
+ " {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} "
529
+ )
530
+ lines.append("=" * 75)
531
+ lines.append(
532
+ " Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed "
533
+ )
534
+
535
+ for metric_key, metric_name, formatter in metrics_to_display:
536
+ lines.append("-" * 75)
537
+ submetrics = [("all", metric_name)]
538
+ if not abbreviated:
539
+ submetrics.append(("large_pool", " from large pool"))
540
+ submetrics.append(("small_pool", " from small pool"))
541
+
542
+ current_prefval, peak_prefval, allocated_prefval, freed_prefval = (
543
+ None,
544
+ None,
545
+ None,
546
+ None,
547
+ )
548
+
549
+ for submetric_key, submetric_name in submetrics:
550
+ prefix = metric_key + "." + submetric_key + "."
551
+
552
+ current = stats[prefix + "current"]
553
+ peak = stats[prefix + "peak"]
554
+ allocated = stats[prefix + "allocated"]
555
+ freed = stats[prefix + "freed"]
556
+
557
+ if current_prefval is None:
558
+ current_prefval = current
559
+ peak_prefval = peak
560
+ allocated_prefval = allocated
561
+ freed_prefval = freed
562
+
563
+ lines.append(
564
+ " {:<21} | {} | {} | {} | {} ".format(
565
+ submetric_name,
566
+ formatter(current, current_prefval),
567
+ formatter(peak, peak_prefval),
568
+ formatter(allocated, allocated_prefval),
569
+ formatter(freed, freed_prefval),
570
+ ),
571
+ )
572
+
573
+ metrics_to_display = [
574
+ ("oversize_allocations", "Oversize allocations", _format_count),
575
+ ("oversize_segments", "Oversize GPU segments", _format_count),
576
+ ]
577
+
578
+ for metric_key, metric_name, formatter in metrics_to_display:
579
+ lines.append("-" * 75)
580
+
581
+ prefix = metric_key + "."
582
+
583
+ current = stats[prefix + "current"]
584
+ peak = stats[prefix + "peak"]
585
+ allocated = stats[prefix + "allocated"]
586
+ freed = stats[prefix + "freed"]
587
+
588
+ lines.append(
589
+ " {:<21} | {} | {} | {} | {} ".format(
590
+ metric_name,
591
+ formatter(current, current),
592
+ formatter(peak, peak),
593
+ formatter(allocated, allocated),
594
+ formatter(freed, freed),
595
+ ),
596
+ )
597
+
598
+ lines.append("=" * 75)
599
+
600
+ fmt_dict = {"_": "", "device": device}
601
+ for k, v in stats.items():
602
+ fmt_dict[k.replace(".", "-")] = v
603
+ return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
604
+
605
+
606
+ def list_gpu_processes(device: Union[Device, int] = None) -> str:
607
+ r"""Return a human-readable printout of the running processes and their GPU memory use for a given device.
608
+
609
+ This can be useful to display periodically during training, or when
610
+ handling out-of-memory exceptions.
611
+
612
+ Args:
613
+ device (torch.device or int, optional): selected device. Returns
614
+ printout for the current device, given by :func:`~torch.cuda.current_device`,
615
+ if :attr:`device` is ``None`` (default).
616
+ """
617
+ try:
618
+ import pynvml # type: ignore[import]
619
+ except ModuleNotFoundError:
620
+ return "pynvml module not found, please install pynvml"
621
+ from pynvml import NVMLError_DriverNotLoaded
622
+
623
+ try:
624
+ pynvml.nvmlInit()
625
+ except NVMLError_DriverNotLoaded:
626
+ return "cuda driver can't be loaded, is cuda enabled?"
627
+ device = _get_nvml_device_index(device)
628
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
629
+ procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
630
+ lines = []
631
+ lines.append(f"GPU:{device}")
632
+ if len(procs) == 0:
633
+ lines.append("no processes are running")
634
+ for p in procs:
635
+ mem = p.usedGpuMemory / (1024 * 1024)
636
+ lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory")
637
+ return "\n".join(lines)
638
+
639
+
640
+ def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]:
641
+ r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo.
642
+
643
+ Args:
644
+ device (torch.device or int, optional): selected device. Returns
645
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
646
+ if :attr:`device` is ``None`` (default).
647
+
648
+ .. note::
649
+ See :ref:`cuda-memory-management` for more
650
+ details about GPU memory management.
651
+ """
652
+ if device is None:
653
+ device = torch.cuda.current_device()
654
+ device = _get_device_index(device)
655
+ return torch.cuda.cudart().cudaMemGetInfo(device)
656
+
657
+
658
+ def _record_memory_history_legacy(
659
+ enabled: bool,
660
+ record_context=True,
661
+ trace_alloc_max_entries=1,
662
+ trace_alloc_record_context=False,
663
+ device: Union[Device, int] = None,
664
+ record_context_cpp=False,
665
+ ):
666
+ _C._cuda_record_memory_history_legacy(
667
+ enabled,
668
+ record_context,
669
+ trace_alloc_max_entries,
670
+ trace_alloc_record_context,
671
+ record_context_cpp,
672
+ )
673
+
674
+
675
+ def _record_memory_history(enabled="all", *args, **kwargs):
676
+ """Enable recording of stack traces associated with memory
677
+ allocations, so you can tell what allocated any piece of memory in
678
+ :func:`torch.cuda.memory._snapshot()`.
679
+
680
+ In addition too keeping stack traces with each current allocation and free,
681
+ this will also enable recording of a history of all alloc/free events.
682
+
683
+ Use :func:`torch.cuda.memory._snapshot()` to retrieve this information,
684
+ and the tools in `_memory_viz.py` to visualize snapshots.
685
+
686
+ The Python trace collection is fast (2us per trace), so you may consider
687
+ enabling this on production jobs if you anticipate ever having to debug
688
+ memory issues.
689
+
690
+ C++ trace collection is also fast (~50ns/frame), which for many typical programs
691
+ works out to ~2us per trace, but can vary depending on stack depth.
692
+
693
+ Args:
694
+ enabled (Literal[None, "state", "all"], optional):
695
+ `None`, disable recording memory history.
696
+ `"state"`, keep information for currenly allocated memory.
697
+ `"all"`, additionally keep a history of all alloc/free calls.
698
+ Defaults to "all".
699
+ context (Literal[None, "state", "alloc", "all"], optional):
700
+ `None`, Do not record any tracebacks.
701
+ `"state"`, Record tracebacks for currently allocated memory.
702
+ `"alloc"`, additionally keep tracebacks for alloc calls.
703
+ `"all"`, additionally keep tracebacks for free calls.
704
+ Defaults to "all".
705
+ stacks (Literal["python", "all"], optional):
706
+ `"python"`, include Python, TorchScript, and inductor frames in tracebacks
707
+ `"all"`, additionally include C++ frames
708
+ Defaults to "all".
709
+ max_entries (int, optional): Keep a maximum of `max_entries`
710
+ alloc/free events in the recorded history recorded.
711
+ """
712
+ if isinstance(enabled, bool):
713
+ return _record_memory_history_legacy(enabled, *args, **kwargs)
714
+ else:
715
+ return _record_memory_history_impl(enabled, *args, **kwargs)
716
+
717
+
718
+ def _record_memory_history_impl(
719
+ enabled: Optional[str] = "all",
720
+ context: Optional[str] = "all",
721
+ stacks: str = "all",
722
+ max_entries: int = sys.maxsize,
723
+ device: Union[Device, int] = None,
724
+ ):
725
+ _C._cuda_record_memory_history(enabled, context, stacks, max_entries)
726
+
727
+
728
+ _record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined]
729
+
730
+
731
+ def _snapshot(device: Union[Device, int] = None):
732
+ """Save a snapshot of CUDA memory state at the time it was called.
733
+
734
+ The state is represented as a dictionary with the following structure.
735
+
736
+ .. code-block:: python
737
+
738
+ class Snapshot(TypedDict):
739
+ segments : List[Segment]
740
+ device_traces: List[List[TraceEntry]]
741
+
742
+ class Segment(TypedDict):
743
+ # Segments are memory returned from a cudaMalloc call.
744
+ # The size of reserved memory is the sum of all Segments.
745
+ # Segments are cached and reused for future allocations.
746
+ # If the reuse is smaller than the segment, the segment
747
+ # is split into more then one Block.
748
+ # empty_cache() frees Segments that are entirely inactive.
749
+ address: int
750
+ total_size: int # cudaMalloc'd size of segment
751
+ stream: int
752
+ segment_type: Literal['small', 'large'] # 'large' (>1MB)
753
+ allocated_size: int # size of memory in use
754
+ active_size: int # size of memory in use or in active_awaiting_free state
755
+ blocks : List[Block]
756
+
757
+ class Block(TypedDict):
758
+ # A piece of memory returned from the allocator, or
759
+ # current cached but inactive.
760
+ size: int
761
+ requested_size: int # size requested during malloc, may be smaller than
762
+ # size due to rounding
763
+ address: int
764
+ state: Literal['active_allocated', # used by a tensor
765
+ 'active_awaiting_free', # waiting for another stream to finish using
766
+ # this, then it will become free
767
+ 'inactive',] # free for reuse
768
+ frames: List[Frame] # stack trace from where the allocation occurred
769
+
770
+ class Frame(TypedDict):
771
+ filename: str
772
+ line: int
773
+ name: str
774
+
775
+ class TraceEntry(TypedDict):
776
+ # When `torch.cuda.memory._record_memory_history()` is enabled,
777
+ # the snapshot will contain TraceEntry objects that record each
778
+ # action the allocator took.
779
+ action: Literal[
780
+ 'alloc' # memory allocated
781
+ 'free_requested', # the allocated received a call to free memory
782
+ 'free_completed', # the memory that was requested to be freed is now
783
+ # able to be used in future allocation calls
784
+ 'segment_alloc', # the caching allocator ask cudaMalloc for more memory
785
+ # and added it as a segment in its cache
786
+ 'segment_free', # the caching allocator called cudaFree to return memory
787
+ # to cuda possibly trying free up memory to
788
+ # allocate more segments or because empty_caches was called
789
+ 'oom', # the allocator threw an OOM exception. 'size' is
790
+ # the requested number of bytes that did not succeed
791
+ 'snapshot' # the allocator generated a memory snapshot
792
+ # useful to coorelate a previously taken
793
+ # snapshot with this trace
794
+ ]
795
+ addr: int # not present for OOM
796
+ frames: List[Frame]
797
+ size: int
798
+ stream: int
799
+ device_free: int # only present for OOM, the amount of
800
+ # memory cuda still reports to be free
801
+
802
+ Returns:
803
+ The Snapshot dictionary object
804
+ """
805
+ return _C._cuda_memorySnapshot()
806
+
807
+
808
+ def _dump_snapshot(filename="dump_snapshot.pickle"):
809
+ """
810
+ Save a pickled version of the `torch.memory._snapshot()` dictionary to a file.
811
+
812
+ This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz
813
+
814
+ Args:
815
+ filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle".
816
+ """
817
+ s = _snapshot()
818
+ with open(filename, "wb") as f:
819
+ pickle.dump(s, f)
820
+
821
+
822
+ def _save_segment_usage(filename="output.svg", snapshot=None):
823
+ if snapshot is None:
824
+ snapshot = _snapshot()
825
+ with open(filename, "w") as f:
826
+ f.write(_segments(snapshot))
827
+
828
+
829
+ def _save_memory_usage(filename="output.svg", snapshot=None):
830
+ if snapshot is None:
831
+ snapshot = _snapshot()
832
+ with open(filename, "w") as f:
833
+ f.write(_memory(snapshot))
834
+
835
+
836
+ def _set_allocator_settings(env: str):
837
+ return torch._C._cuda_cudaCachingAllocator_set_allocator_settings(env)
838
+
839
+
840
+ def get_allocator_backend() -> str:
841
+ r"""Return a string describing the active allocator backend as set by
842
+ ``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are
843
+ ``native`` (PyTorch's native caching allocator) and `cudaMallocAsync``
844
+ (CUDA's built-in asynchronous allocator).
845
+
846
+ .. note::
847
+ See :ref:`cuda-memory-management` for details on choosing the allocator backend.
848
+ """
849
+ return torch._C._cuda_getAllocatorBackend()
850
+
851
+
852
+ class _CUDAAllocator:
853
+ r"""Wrapper over internal CUDA memory allocators."""
854
+
855
+ def __init__(self, allocator: torch._C._cuda_CUDAAllocator):
856
+ self._allocator = allocator
857
+
858
+ def allocator(self):
859
+ return self._allocator
860
+
861
+
862
+ class CUDAPluggableAllocator(_CUDAAllocator):
863
+ r"""CUDA memory allocator loaded from a so file."""
864
+
865
+ def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str):
866
+ r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes.
867
+
868
+ To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function.
869
+
870
+ Args:
871
+ path_to_so_file(str): Path in the filesystem to the `.so` file containing
872
+ the allocator functions
873
+ alloc_fn_name(str): Name of the function to perform the memory allocation
874
+ in the so file. The signature must be:
875
+ void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream);
876
+ free_fn_name(str): Name of the function to perform the memory release
877
+ in the so file. The signature must be:
878
+ void free_fn_name(void* ptr, size_t size, cudaStream_t stream);
879
+
880
+ .. warning::
881
+ This is currently supported only in unix OSs
882
+
883
+ .. note::
884
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
885
+ """
886
+ allocator = ctypes.CDLL(path_to_so_file)
887
+ alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value
888
+ free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value
889
+ assert alloc_fn is not None
890
+ assert free_fn is not None
891
+ self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn)
892
+
893
+
894
+ def change_current_allocator(allocator: _CUDAAllocator) -> None:
895
+ r"""Change the currently used memory allocator to be the one provided.
896
+
897
+ If the current allocator has already been used/initialized, this function will error.
898
+
899
+
900
+ Args:
901
+ allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one.
902
+ .. note::
903
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
904
+ """
905
+ torch._C._cuda_changeCurrentAllocator(allocator.allocator())
906
+
907
+
908
+ def _get_current_allocator() -> _CUDAAllocator:
909
+ r"""Return the allocator being currently used.
910
+
911
+ .. note::
912
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
913
+ """
914
+ return _CUDAAllocator(torch._C._cuda_getAllocator())
venv/lib/python3.10/site-packages/torch/cuda/profiler.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import tempfile
3
+
4
+ import torch
5
+ from . import check_error, cudart
6
+
7
+ __all__ = ["init", "start", "stop", "profile"]
8
+
9
+ DEFAULT_FLAGS = [
10
+ "gpustarttimestamp",
11
+ "gpuendtimestamp",
12
+ "gridsize3d",
13
+ "threadblocksize",
14
+ "streamid",
15
+ "enableonstart 0",
16
+ "conckerneltrace",
17
+ ]
18
+
19
+
20
+ def init(output_file, flags=None, output_mode="key_value"):
21
+ rt = cudart()
22
+ if not hasattr(rt, "cudaOutputMode"):
23
+ raise AssertionError("HIP does not support profiler initialization!")
24
+ if (
25
+ hasattr(torch.version, "cuda")
26
+ and torch.version.cuda is not None
27
+ and int(torch.version.cuda.split(".")[0]) >= 12
28
+ ):
29
+ # Check https://github.com/pytorch/pytorch/pull/91118
30
+ # cudaProfilerInitialize is no longer needed after CUDA 12
31
+ raise AssertionError("CUDA12+ does not need profiler initialization!")
32
+ flags = DEFAULT_FLAGS if flags is None else flags
33
+ if output_mode == "key_value":
34
+ output_mode_enum = rt.cudaOutputMode.KeyValuePair
35
+ elif output_mode == "csv":
36
+ output_mode_enum = rt.cudaOutputMode.CSV
37
+ else:
38
+ raise RuntimeError(
39
+ "supported CUDA profiler output modes are: key_value and csv"
40
+ )
41
+ with tempfile.NamedTemporaryFile(delete=True) as f:
42
+ f.write(b"\n".join(f.encode("ascii") for f in flags))
43
+ f.flush()
44
+ check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum))
45
+
46
+
47
+ def start():
48
+ check_error(cudart().cudaProfilerStart())
49
+
50
+
51
+ def stop():
52
+ check_error(cudart().cudaProfilerStop())
53
+
54
+
55
+ @contextlib.contextmanager
56
+ def profile():
57
+ try:
58
+ start()
59
+ yield
60
+ finally:
61
+ stop()
venv/lib/python3.10/site-packages/torch/include/clog.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <stdarg.h>
12
+ #include <stdlib.h>
13
+ #include <inttypes.h>
14
+
15
+ #define CLOG_NONE 0
16
+ #define CLOG_FATAL 1
17
+ #define CLOG_ERROR 2
18
+ #define CLOG_WARNING 3
19
+ #define CLOG_INFO 4
20
+ #define CLOG_DEBUG 5
21
+
22
+ #ifndef CLOG_VISIBILITY
23
+ #if defined(__ELF__)
24
+ #define CLOG_VISIBILITY __attribute__((__visibility__("internal")))
25
+ #elif defined(__MACH__)
26
+ #define CLOG_VISIBILITY __attribute__((__visibility__("hidden")))
27
+ #else
28
+ #define CLOG_VISIBILITY
29
+ #endif
30
+ #endif
31
+
32
+ #ifndef CLOG_ARGUMENTS_FORMAT
33
+ #if defined(__GNUC__)
34
+ #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
35
+ #else
36
+ #define CLOG_ARGUMENTS_FORMAT
37
+ #endif
38
+ #endif
39
+
40
+ #ifdef __cplusplus
41
+ extern "C" {
42
+ #endif
43
+
44
+ CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args);
45
+ CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args);
46
+ CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args);
47
+ CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args);
48
+ CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args);
49
+
50
+ #define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \
51
+ CLOG_ARGUMENTS_FORMAT \
52
+ inline static void log_debug_function_name(const char* format, ...) { \
53
+ if (level >= CLOG_DEBUG) { \
54
+ va_list args; \
55
+ va_start(args, format); \
56
+ clog_vlog_debug(module, format, args); \
57
+ va_end(args); \
58
+ } \
59
+ }
60
+
61
+ #define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \
62
+ CLOG_ARGUMENTS_FORMAT \
63
+ inline static void log_info_function_name(const char* format, ...) { \
64
+ if (level >= CLOG_INFO) { \
65
+ va_list args; \
66
+ va_start(args, format); \
67
+ clog_vlog_info(module, format, args); \
68
+ va_end(args); \
69
+ } \
70
+ }
71
+
72
+ #define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \
73
+ CLOG_ARGUMENTS_FORMAT \
74
+ inline static void log_warning_function_name(const char* format, ...) { \
75
+ if (level >= CLOG_WARNING) { \
76
+ va_list args; \
77
+ va_start(args, format); \
78
+ clog_vlog_warning(module, format, args); \
79
+ va_end(args); \
80
+ } \
81
+ }
82
+
83
+ #define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \
84
+ CLOG_ARGUMENTS_FORMAT \
85
+ inline static void log_error_function_name(const char* format, ...) { \
86
+ if (level >= CLOG_ERROR) { \
87
+ va_list args; \
88
+ va_start(args, format); \
89
+ clog_vlog_error(module, format, args); \
90
+ va_end(args); \
91
+ } \
92
+ }
93
+
94
+ #define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \
95
+ CLOG_ARGUMENTS_FORMAT \
96
+ inline static void log_fatal_function_name(const char* format, ...) { \
97
+ if (level >= CLOG_FATAL) { \
98
+ va_list args; \
99
+ va_start(args, format); \
100
+ clog_vlog_fatal(module, format, args); \
101
+ va_end(args); \
102
+ } \
103
+ abort(); \
104
+ }
105
+
106
+ #ifdef __cplusplus
107
+ } /* extern "C" */
108
+ #endif
venv/lib/python3.10/site-packages/torch/include/cpuinfo.h ADDED
@@ -0,0 +1,1956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef CPUINFO_H
3
+ #define CPUINFO_H
4
+
5
+ #ifndef __cplusplus
6
+ #include <stdbool.h>
7
+ #endif
8
+
9
+ #ifdef __APPLE__
10
+ #include <TargetConditionals.h>
11
+ #endif
12
+
13
+ #include <stdint.h>
14
+
15
+ /* Identify architecture and define corresponding macro */
16
+
17
+ #if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
18
+ #define CPUINFO_ARCH_X86 1
19
+ #endif
20
+
21
+ #if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
22
+ #define CPUINFO_ARCH_X86_64 1
23
+ #endif
24
+
25
+ #if defined(__arm__) || defined(_M_ARM)
26
+ #define CPUINFO_ARCH_ARM 1
27
+ #endif
28
+
29
+ #if defined(__aarch64__) || defined(_M_ARM64)
30
+ #define CPUINFO_ARCH_ARM64 1
31
+ #endif
32
+
33
+ #if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
34
+ #define CPUINFO_ARCH_PPC64 1
35
+ #endif
36
+
37
+ #if defined(__asmjs__)
38
+ #define CPUINFO_ARCH_ASMJS 1
39
+ #endif
40
+
41
+ #if defined(__wasm__)
42
+ #if defined(__wasm_simd128__)
43
+ #define CPUINFO_ARCH_WASMSIMD 1
44
+ #else
45
+ #define CPUINFO_ARCH_WASM 1
46
+ #endif
47
+ #endif
48
+
49
+ /* Define other architecture-specific macros as 0 */
50
+
51
+ #ifndef CPUINFO_ARCH_X86
52
+ #define CPUINFO_ARCH_X86 0
53
+ #endif
54
+
55
+ #ifndef CPUINFO_ARCH_X86_64
56
+ #define CPUINFO_ARCH_X86_64 0
57
+ #endif
58
+
59
+ #ifndef CPUINFO_ARCH_ARM
60
+ #define CPUINFO_ARCH_ARM 0
61
+ #endif
62
+
63
+ #ifndef CPUINFO_ARCH_ARM64
64
+ #define CPUINFO_ARCH_ARM64 0
65
+ #endif
66
+
67
+ #ifndef CPUINFO_ARCH_PPC64
68
+ #define CPUINFO_ARCH_PPC64 0
69
+ #endif
70
+
71
+ #ifndef CPUINFO_ARCH_ASMJS
72
+ #define CPUINFO_ARCH_ASMJS 0
73
+ #endif
74
+
75
+ #ifndef CPUINFO_ARCH_WASM
76
+ #define CPUINFO_ARCH_WASM 0
77
+ #endif
78
+
79
+ #ifndef CPUINFO_ARCH_WASMSIMD
80
+ #define CPUINFO_ARCH_WASMSIMD 0
81
+ #endif
82
+
83
+ #if CPUINFO_ARCH_X86 && defined(_MSC_VER)
84
+ #define CPUINFO_ABI __cdecl
85
+ #elif CPUINFO_ARCH_X86 && defined(__GNUC__)
86
+ #define CPUINFO_ABI __attribute__((__cdecl__))
87
+ #else
88
+ #define CPUINFO_ABI
89
+ #endif
90
+
91
+ #define CPUINFO_CACHE_UNIFIED 0x00000001
92
+ #define CPUINFO_CACHE_INCLUSIVE 0x00000002
93
+ #define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
94
+
95
+ struct cpuinfo_cache {
96
+ /** Cache size in bytes */
97
+ uint32_t size;
98
+ /** Number of ways of associativity */
99
+ uint32_t associativity;
100
+ /** Number of sets */
101
+ uint32_t sets;
102
+ /** Number of partitions */
103
+ uint32_t partitions;
104
+ /** Line size in bytes */
105
+ uint32_t line_size;
106
+ /**
107
+ * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing).
108
+ *
109
+ * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING
110
+ */
111
+ uint32_t flags;
112
+ /** Index of the first logical processor that shares this cache */
113
+ uint32_t processor_start;
114
+ /** Number of logical processors that share this cache */
115
+ uint32_t processor_count;
116
+ };
117
+
118
+ struct cpuinfo_trace_cache {
119
+ uint32_t uops;
120
+ uint32_t associativity;
121
+ };
122
+
123
+ #define CPUINFO_PAGE_SIZE_4KB 0x1000
124
+ #define CPUINFO_PAGE_SIZE_1MB 0x100000
125
+ #define CPUINFO_PAGE_SIZE_2MB 0x200000
126
+ #define CPUINFO_PAGE_SIZE_4MB 0x400000
127
+ #define CPUINFO_PAGE_SIZE_16MB 0x1000000
128
+ #define CPUINFO_PAGE_SIZE_1GB 0x40000000
129
+
130
+ struct cpuinfo_tlb {
131
+ uint32_t entries;
132
+ uint32_t associativity;
133
+ uint64_t pages;
134
+ };
135
+
136
+ /** Vendor of processor core design */
137
+ enum cpuinfo_vendor {
138
+ /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */
139
+ cpuinfo_vendor_unknown = 0,
140
+
141
+ /* Active vendors of modern CPUs */
142
+
143
+ /**
144
+ * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures.
145
+ *
146
+ * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004.
147
+ */
148
+ cpuinfo_vendor_intel = 1,
149
+ /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */
150
+ cpuinfo_vendor_amd = 2,
151
+ /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */
152
+ cpuinfo_vendor_arm = 3,
153
+ /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */
154
+ cpuinfo_vendor_qualcomm = 4,
155
+ /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */
156
+ cpuinfo_vendor_apple = 5,
157
+ /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */
158
+ cpuinfo_vendor_samsung = 6,
159
+ /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */
160
+ cpuinfo_vendor_nvidia = 7,
161
+ /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */
162
+ cpuinfo_vendor_mips = 8,
163
+ /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */
164
+ cpuinfo_vendor_ibm = 9,
165
+ /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */
166
+ cpuinfo_vendor_ingenic = 10,
167
+ /**
168
+ * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures.
169
+ *
170
+ * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies.
171
+ */
172
+ cpuinfo_vendor_via = 11,
173
+ /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
174
+ cpuinfo_vendor_cavium = 12,
175
+ /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */
176
+ cpuinfo_vendor_broadcom = 13,
177
+ /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */
178
+ cpuinfo_vendor_apm = 14,
179
+ /**
180
+ * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures.
181
+ *
182
+ * Processors are designed by HiSilicon, a subsidiary of Huawei.
183
+ */
184
+ cpuinfo_vendor_huawei = 15,
185
+ /**
186
+ * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures.
187
+ *
188
+ * Processors are variants of AMD cores.
189
+ */
190
+ cpuinfo_vendor_hygon = 16,
191
+
192
+ /* Active vendors of embedded CPUs */
193
+
194
+ /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */
195
+ cpuinfo_vendor_texas_instruments = 30,
196
+ /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */
197
+ cpuinfo_vendor_marvell = 31,
198
+ /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */
199
+ cpuinfo_vendor_rdc = 32,
200
+ /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */
201
+ cpuinfo_vendor_dmp = 33,
202
+ /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */
203
+ cpuinfo_vendor_motorola = 34,
204
+
205
+ /* Defunct CPU vendors */
206
+
207
+ /**
208
+ * Transmeta Corporation. Vendor of x86 processor microarchitectures.
209
+ *
210
+ * Now defunct. The last processor design was released in 2004.
211
+ * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code.
212
+ */
213
+ cpuinfo_vendor_transmeta = 50,
214
+ /**
215
+ * Cyrix Corporation. Vendor of x86 processor microarchitectures.
216
+ *
217
+ * Now defunct. The last processor design was released in 1996.
218
+ */
219
+ cpuinfo_vendor_cyrix = 51,
220
+ /**
221
+ * Rise Technology. Vendor of x86 processor microarchitectures.
222
+ *
223
+ * Now defunct. The last processor design was released in 1999.
224
+ */
225
+ cpuinfo_vendor_rise = 52,
226
+ /**
227
+ * National Semiconductor. Vendor of x86 processor microarchitectures.
228
+ *
229
+ * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998.
230
+ */
231
+ cpuinfo_vendor_nsc = 53,
232
+ /**
233
+ * Silicon Integrated Systems. Vendor of x86 processor microarchitectures.
234
+ *
235
+ * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001.
236
+ */
237
+ cpuinfo_vendor_sis = 54,
238
+ /**
239
+ * NexGen. Vendor of x86 processor microarchitectures.
240
+ *
241
+ * Now defunct. The last processor design was released in 1994.
242
+ * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations.
243
+ */
244
+ cpuinfo_vendor_nexgen = 55,
245
+ /**
246
+ * United Microelectronics Corporation. Vendor of x86 processor microarchitectures.
247
+ *
248
+ * Ceased x86 in the early 1990s. The last processor design was released in 1991.
249
+ * Designed U5C and U5D processors. Both are 486 level.
250
+ */
251
+ cpuinfo_vendor_umc = 56,
252
+ /**
253
+ * Digital Equipment Corporation. Vendor of ARM processor microarchitecture.
254
+ *
255
+ * Sold its ARM designs in 1997. The last processor design was released in 1997.
256
+ */
257
+ cpuinfo_vendor_dec = 57,
258
+ };
259
+
260
+ /**
261
+ * Processor microarchitecture
262
+ *
263
+ * Processors with different microarchitectures often have different instruction performance characteristics,
264
+ * and may have dramatically different pipeline organization.
265
+ */
266
+ enum cpuinfo_uarch {
267
+ /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */
268
+ cpuinfo_uarch_unknown = 0,
269
+
270
+ /** Pentium and Pentium MMX microarchitecture. */
271
+ cpuinfo_uarch_p5 = 0x00100100,
272
+ /** Intel Quark microarchitecture. */
273
+ cpuinfo_uarch_quark = 0x00100101,
274
+
275
+ /** Pentium Pro, Pentium II, and Pentium III. */
276
+ cpuinfo_uarch_p6 = 0x00100200,
277
+ /** Pentium M. */
278
+ cpuinfo_uarch_dothan = 0x00100201,
279
+ /** Intel Core microarchitecture. */
280
+ cpuinfo_uarch_yonah = 0x00100202,
281
+ /** Intel Core 2 microarchitecture on 65 nm process. */
282
+ cpuinfo_uarch_conroe = 0x00100203,
283
+ /** Intel Core 2 microarchitecture on 45 nm process. */
284
+ cpuinfo_uarch_penryn = 0x00100204,
285
+ /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */
286
+ cpuinfo_uarch_nehalem = 0x00100205,
287
+ /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */
288
+ cpuinfo_uarch_sandy_bridge = 0x00100206,
289
+ /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */
290
+ cpuinfo_uarch_ivy_bridge = 0x00100207,
291
+ /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */
292
+ cpuinfo_uarch_haswell = 0x00100208,
293
+ /** Intel Broadwell microarchitecture. */
294
+ cpuinfo_uarch_broadwell = 0x00100209,
295
+ /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */
296
+ cpuinfo_uarch_sky_lake = 0x0010020A,
297
+ /** DEPRECATED (Intel Kaby Lake microarchitecture). */
298
+ cpuinfo_uarch_kaby_lake = 0x0010020A,
299
+ /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */
300
+ cpuinfo_uarch_palm_cove = 0x0010020B,
301
+ /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */
302
+ cpuinfo_uarch_sunny_cove = 0x0010020C,
303
+
304
+ /** Pentium 4 with Willamette, Northwood, or Foster cores. */
305
+ cpuinfo_uarch_willamette = 0x00100300,
306
+ /** Pentium 4 with Prescott and later cores. */
307
+ cpuinfo_uarch_prescott = 0x00100301,
308
+
309
+ /** Intel Atom on 45 nm process. */
310
+ cpuinfo_uarch_bonnell = 0x00100400,
311
+ /** Intel Atom on 32 nm process. */
312
+ cpuinfo_uarch_saltwell = 0x00100401,
313
+ /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */
314
+ cpuinfo_uarch_silvermont = 0x00100402,
315
+ /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */
316
+ cpuinfo_uarch_airmont = 0x00100403,
317
+ /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */
318
+ cpuinfo_uarch_goldmont = 0x00100404,
319
+ /** Intel Goldmont Plus microarchitecture (Gemini Lake). */
320
+ cpuinfo_uarch_goldmont_plus = 0x00100405,
321
+
322
+ /** Intel Knights Ferry HPC boards. */
323
+ cpuinfo_uarch_knights_ferry = 0x00100500,
324
+ /** Intel Knights Corner HPC boards (aka Xeon Phi). */
325
+ cpuinfo_uarch_knights_corner = 0x00100501,
326
+ /** Intel Knights Landing microarchitecture (second-gen MIC). */
327
+ cpuinfo_uarch_knights_landing = 0x00100502,
328
+ /** Intel Knights Hill microarchitecture (third-gen MIC). */
329
+ cpuinfo_uarch_knights_hill = 0x00100503,
330
+ /** Intel Knights Mill Xeon Phi. */
331
+ cpuinfo_uarch_knights_mill = 0x00100504,
332
+
333
+ /** Intel/Marvell XScale series. */
334
+ cpuinfo_uarch_xscale = 0x00100600,
335
+
336
+ /** AMD K5. */
337
+ cpuinfo_uarch_k5 = 0x00200100,
338
+ /** AMD K6 and alike. */
339
+ cpuinfo_uarch_k6 = 0x00200101,
340
+ /** AMD Athlon and Duron. */
341
+ cpuinfo_uarch_k7 = 0x00200102,
342
+ /** AMD Athlon 64, Opteron 64. */
343
+ cpuinfo_uarch_k8 = 0x00200103,
344
+ /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */
345
+ cpuinfo_uarch_k10 = 0x00200104,
346
+ /**
347
+ * AMD Bulldozer microarchitecture
348
+ * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs.
349
+ */
350
+ cpuinfo_uarch_bulldozer = 0x00200105,
351
+ /**
352
+ * AMD Piledriver microarchitecture
353
+ * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs.
354
+ */
355
+ cpuinfo_uarch_piledriver = 0x00200106,
356
+ /** AMD Steamroller microarchitecture (Kaveri APUs). */
357
+ cpuinfo_uarch_steamroller = 0x00200107,
358
+ /** AMD Excavator microarchitecture (Carizzo APUs). */
359
+ cpuinfo_uarch_excavator = 0x00200108,
360
+ /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */
361
+ cpuinfo_uarch_zen = 0x00200109,
362
+ /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */
363
+ cpuinfo_uarch_zen2 = 0x0020010A,
364
+ /** AMD Zen 3 microarchitecture. */
365
+ cpuinfo_uarch_zen3 = 0x0020010B,
366
+ /** AMD Zen 4 microarchitecture. */
367
+ cpuinfo_uarch_zen4 = 0x0020010C,
368
+
369
+ /** NSC Geode and AMD Geode GX and LX. */
370
+ cpuinfo_uarch_geode = 0x00200200,
371
+ /** AMD Bobcat mobile microarchitecture. */
372
+ cpuinfo_uarch_bobcat = 0x00200201,
373
+ /** AMD Jaguar mobile microarchitecture. */
374
+ cpuinfo_uarch_jaguar = 0x00200202,
375
+ /** AMD Puma mobile microarchitecture. */
376
+ cpuinfo_uarch_puma = 0x00200203,
377
+
378
+ /** ARM7 series. */
379
+ cpuinfo_uarch_arm7 = 0x00300100,
380
+ /** ARM9 series. */
381
+ cpuinfo_uarch_arm9 = 0x00300101,
382
+ /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */
383
+ cpuinfo_uarch_arm11 = 0x00300102,
384
+
385
+ /** ARM Cortex-A5. */
386
+ cpuinfo_uarch_cortex_a5 = 0x00300205,
387
+ /** ARM Cortex-A7. */
388
+ cpuinfo_uarch_cortex_a7 = 0x00300207,
389
+ /** ARM Cortex-A8. */
390
+ cpuinfo_uarch_cortex_a8 = 0x00300208,
391
+ /** ARM Cortex-A9. */
392
+ cpuinfo_uarch_cortex_a9 = 0x00300209,
393
+ /** ARM Cortex-A12. */
394
+ cpuinfo_uarch_cortex_a12 = 0x00300212,
395
+ /** ARM Cortex-A15. */
396
+ cpuinfo_uarch_cortex_a15 = 0x00300215,
397
+ /** ARM Cortex-A17. */
398
+ cpuinfo_uarch_cortex_a17 = 0x00300217,
399
+
400
+ /** ARM Cortex-A32. */
401
+ cpuinfo_uarch_cortex_a32 = 0x00300332,
402
+ /** ARM Cortex-A35. */
403
+ cpuinfo_uarch_cortex_a35 = 0x00300335,
404
+ /** ARM Cortex-A53. */
405
+ cpuinfo_uarch_cortex_a53 = 0x00300353,
406
+ /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */
407
+ cpuinfo_uarch_cortex_a55r0 = 0x00300354,
408
+ /** ARM Cortex-A55. */
409
+ cpuinfo_uarch_cortex_a55 = 0x00300355,
410
+ /** ARM Cortex-A57. */
411
+ cpuinfo_uarch_cortex_a57 = 0x00300357,
412
+ /** ARM Cortex-A65. */
413
+ cpuinfo_uarch_cortex_a65 = 0x00300365,
414
+ /** ARM Cortex-A72. */
415
+ cpuinfo_uarch_cortex_a72 = 0x00300372,
416
+ /** ARM Cortex-A73. */
417
+ cpuinfo_uarch_cortex_a73 = 0x00300373,
418
+ /** ARM Cortex-A75. */
419
+ cpuinfo_uarch_cortex_a75 = 0x00300375,
420
+ /** ARM Cortex-A76. */
421
+ cpuinfo_uarch_cortex_a76 = 0x00300376,
422
+ /** ARM Cortex-A77. */
423
+ cpuinfo_uarch_cortex_a77 = 0x00300377,
424
+ /** ARM Cortex-A78. */
425
+ cpuinfo_uarch_cortex_a78 = 0x00300378,
426
+
427
+ /** ARM Neoverse N1. */
428
+ cpuinfo_uarch_neoverse_n1 = 0x00300400,
429
+ /** ARM Neoverse E1. */
430
+ cpuinfo_uarch_neoverse_e1 = 0x00300401,
431
+ /** ARM Neoverse V1. */
432
+ cpuinfo_uarch_neoverse_v1 = 0x00300402,
433
+ /** ARM Neoverse N2. */
434
+ cpuinfo_uarch_neoverse_n2 = 0x00300403,
435
+ /** ARM Neoverse V2. */
436
+ cpuinfo_uarch_neoverse_v2 = 0x00300404,
437
+
438
+ /** ARM Cortex-X1. */
439
+ cpuinfo_uarch_cortex_x1 = 0x00300501,
440
+ /** ARM Cortex-X2. */
441
+ cpuinfo_uarch_cortex_x2 = 0x00300502,
442
+ /** ARM Cortex-X3. */
443
+ cpuinfo_uarch_cortex_x3 = 0x00300503,
444
+
445
+ /** ARM Cortex-A510. */
446
+ cpuinfo_uarch_cortex_a510 = 0x00300551,
447
+ /** ARM Cortex-A710. */
448
+ cpuinfo_uarch_cortex_a710 = 0x00300571,
449
+ /** ARM Cortex-A715. */
450
+ cpuinfo_uarch_cortex_a715 = 0x00300572,
451
+
452
+ /** Qualcomm Scorpion. */
453
+ cpuinfo_uarch_scorpion = 0x00400100,
454
+ /** Qualcomm Krait. */
455
+ cpuinfo_uarch_krait = 0x00400101,
456
+ /** Qualcomm Kryo. */
457
+ cpuinfo_uarch_kryo = 0x00400102,
458
+ /** Qualcomm Falkor. */
459
+ cpuinfo_uarch_falkor = 0x00400103,
460
+ /** Qualcomm Saphira. */
461
+ cpuinfo_uarch_saphira = 0x00400104,
462
+
463
+ /** Nvidia Denver. */
464
+ cpuinfo_uarch_denver = 0x00500100,
465
+ /** Nvidia Denver 2. */
466
+ cpuinfo_uarch_denver2 = 0x00500101,
467
+ /** Nvidia Carmel. */
468
+ cpuinfo_uarch_carmel = 0x00500102,
469
+
470
+ /** Samsung Exynos M1 (Exynos 8890 big cores). */
471
+ cpuinfo_uarch_exynos_m1 = 0x00600100,
472
+ /** Samsung Exynos M2 (Exynos 8895 big cores). */
473
+ cpuinfo_uarch_exynos_m2 = 0x00600101,
474
+ /** Samsung Exynos M3 (Exynos 9810 big cores). */
475
+ cpuinfo_uarch_exynos_m3 = 0x00600102,
476
+ /** Samsung Exynos M4 (Exynos 9820 big cores). */
477
+ cpuinfo_uarch_exynos_m4 = 0x00600103,
478
+ /** Samsung Exynos M5 (Exynos 9830 big cores). */
479
+ cpuinfo_uarch_exynos_m5 = 0x00600104,
480
+
481
+ /* Deprecated synonym for Cortex-A76 */
482
+ cpuinfo_uarch_cortex_a76ae = 0x00300376,
483
+ /* Deprecated names for Exynos. */
484
+ cpuinfo_uarch_mongoose_m1 = 0x00600100,
485
+ cpuinfo_uarch_mongoose_m2 = 0x00600101,
486
+ cpuinfo_uarch_meerkat_m3 = 0x00600102,
487
+ cpuinfo_uarch_meerkat_m4 = 0x00600103,
488
+
489
+ /** Apple A6 and A6X processors. */
490
+ cpuinfo_uarch_swift = 0x00700100,
491
+ /** Apple A7 processor. */
492
+ cpuinfo_uarch_cyclone = 0x00700101,
493
+ /** Apple A8 and A8X processor. */
494
+ cpuinfo_uarch_typhoon = 0x00700102,
495
+ /** Apple A9 and A9X processor. */
496
+ cpuinfo_uarch_twister = 0x00700103,
497
+ /** Apple A10 and A10X processor. */
498
+ cpuinfo_uarch_hurricane = 0x00700104,
499
+ /** Apple A11 processor (big cores). */
500
+ cpuinfo_uarch_monsoon = 0x00700105,
501
+ /** Apple A11 processor (little cores). */
502
+ cpuinfo_uarch_mistral = 0x00700106,
503
+ /** Apple A12 processor (big cores). */
504
+ cpuinfo_uarch_vortex = 0x00700107,
505
+ /** Apple A12 processor (little cores). */
506
+ cpuinfo_uarch_tempest = 0x00700108,
507
+ /** Apple A13 processor (big cores). */
508
+ cpuinfo_uarch_lightning = 0x00700109,
509
+ /** Apple A13 processor (little cores). */
510
+ cpuinfo_uarch_thunder = 0x0070010A,
511
+ /** Apple A14 / M1 processor (big cores). */
512
+ cpuinfo_uarch_firestorm = 0x0070010B,
513
+ /** Apple A14 / M1 processor (little cores). */
514
+ cpuinfo_uarch_icestorm = 0x0070010C,
515
+ /** Apple A15 / M2 processor (big cores). */
516
+ cpuinfo_uarch_avalanche = 0x0070010D,
517
+ /** Apple A15 / M2 processor (little cores). */
518
+ cpuinfo_uarch_blizzard = 0x0070010E,
519
+
520
+ /** Cavium ThunderX. */
521
+ cpuinfo_uarch_thunderx = 0x00800100,
522
+ /** Cavium ThunderX2 (originally Broadcom Vulkan). */
523
+ cpuinfo_uarch_thunderx2 = 0x00800200,
524
+
525
+ /** Marvell PJ4. */
526
+ cpuinfo_uarch_pj4 = 0x00900100,
527
+
528
+ /** Broadcom Brahma B15. */
529
+ cpuinfo_uarch_brahma_b15 = 0x00A00100,
530
+ /** Broadcom Brahma B53. */
531
+ cpuinfo_uarch_brahma_b53 = 0x00A00101,
532
+
533
+ /** Applied Micro X-Gene. */
534
+ cpuinfo_uarch_xgene = 0x00B00100,
535
+
536
+ /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */
537
+ cpuinfo_uarch_dhyana = 0x01000100,
538
+
539
+ /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */
540
+ cpuinfo_uarch_taishan_v110 = 0x00C00100,
541
+ };
542
+
543
+ struct cpuinfo_processor {
544
+ /** SMT (hyperthread) ID within a core */
545
+ uint32_t smt_id;
546
+ /** Core containing this logical processor */
547
+ const struct cpuinfo_core* core;
548
+ /** Cluster of cores containing this logical processor */
549
+ const struct cpuinfo_cluster* cluster;
550
+ /** Physical package containing this logical processor */
551
+ const struct cpuinfo_package* package;
552
+ #if defined(__linux__)
553
+ /**
554
+ * Linux-specific ID for the logical processor:
555
+ * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu<linux_id>/
556
+ * - Bit <linux_id> in the cpu_set_t identifies this logical processor
557
+ */
558
+ int linux_id;
559
+ #endif
560
+ #if defined(_WIN32) || defined(__CYGWIN__)
561
+ /** Windows-specific ID for the group containing the logical processor. */
562
+ uint16_t windows_group_id;
563
+ /**
564
+ * Windows-specific ID of the logical processor within its group:
565
+ * - Bit <windows_processor_id> in the KAFFINITY mask identifies this logical processor within its group.
566
+ */
567
+ uint16_t windows_processor_id;
568
+ #endif
569
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
570
+ /** APIC ID (unique x86-specific ID of the logical processor) */
571
+ uint32_t apic_id;
572
+ #endif
573
+ struct {
574
+ /** Level 1 instruction cache */
575
+ const struct cpuinfo_cache* l1i;
576
+ /** Level 1 data cache */
577
+ const struct cpuinfo_cache* l1d;
578
+ /** Level 2 unified or data cache */
579
+ const struct cpuinfo_cache* l2;
580
+ /** Level 3 unified or data cache */
581
+ const struct cpuinfo_cache* l3;
582
+ /** Level 4 unified or data cache */
583
+ const struct cpuinfo_cache* l4;
584
+ } cache;
585
+ };
586
+
587
+ struct cpuinfo_core {
588
+ /** Index of the first logical processor on this core. */
589
+ uint32_t processor_start;
590
+ /** Number of logical processors on this core */
591
+ uint32_t processor_count;
592
+ /** Core ID within a package */
593
+ uint32_t core_id;
594
+ /** Cluster containing this core */
595
+ const struct cpuinfo_cluster* cluster;
596
+ /** Physical package containing this core. */
597
+ const struct cpuinfo_package* package;
598
+ /** Vendor of the CPU microarchitecture for this core */
599
+ enum cpuinfo_vendor vendor;
600
+ /** CPU microarchitecture for this core */
601
+ enum cpuinfo_uarch uarch;
602
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
603
+ /** Value of CPUID leaf 1 EAX register for this core */
604
+ uint32_t cpuid;
605
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
606
+ /** Value of Main ID Register (MIDR) for this core */
607
+ uint32_t midr;
608
+ #endif
609
+ /** Clock rate (non-Turbo) of the core, in Hz */
610
+ uint64_t frequency;
611
+ };
612
+
613
+ struct cpuinfo_cluster {
614
+ /** Index of the first logical processor in the cluster */
615
+ uint32_t processor_start;
616
+ /** Number of logical processors in the cluster */
617
+ uint32_t processor_count;
618
+ /** Index of the first core in the cluster */
619
+ uint32_t core_start;
620
+ /** Number of cores on the cluster */
621
+ uint32_t core_count;
622
+ /** Cluster ID within a package */
623
+ uint32_t cluster_id;
624
+ /** Physical package containing the cluster */
625
+ const struct cpuinfo_package* package;
626
+ /** CPU microarchitecture vendor of the cores in the cluster */
627
+ enum cpuinfo_vendor vendor;
628
+ /** CPU microarchitecture of the cores in the cluster */
629
+ enum cpuinfo_uarch uarch;
630
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
631
+ /** Value of CPUID leaf 1 EAX register of the cores in the cluster */
632
+ uint32_t cpuid;
633
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
634
+ /** Value of Main ID Register (MIDR) of the cores in the cluster */
635
+ uint32_t midr;
636
+ #endif
637
+ /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
638
+ uint64_t frequency;
639
+ };
640
+
641
+ #define CPUINFO_PACKAGE_NAME_MAX 48
642
+
643
+ struct cpuinfo_package {
644
+ /** SoC or processor chip model name */
645
+ char name[CPUINFO_PACKAGE_NAME_MAX];
646
+ /** Index of the first logical processor on this physical package */
647
+ uint32_t processor_start;
648
+ /** Number of logical processors on this physical package */
649
+ uint32_t processor_count;
650
+ /** Index of the first core on this physical package */
651
+ uint32_t core_start;
652
+ /** Number of cores on this physical package */
653
+ uint32_t core_count;
654
+ /** Index of the first cluster of cores on this physical package */
655
+ uint32_t cluster_start;
656
+ /** Number of clusters of cores on this physical package */
657
+ uint32_t cluster_count;
658
+ };
659
+
660
+ struct cpuinfo_uarch_info {
661
+ /** Type of CPU microarchitecture */
662
+ enum cpuinfo_uarch uarch;
663
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
664
+ /** Value of CPUID leaf 1 EAX register for the microarchitecture */
665
+ uint32_t cpuid;
666
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
667
+ /** Value of Main ID Register (MIDR) for the microarchitecture */
668
+ uint32_t midr;
669
+ #endif
670
+ /** Number of logical processors with the microarchitecture */
671
+ uint32_t processor_count;
672
+ /** Number of cores with the microarchitecture */
673
+ uint32_t core_count;
674
+ };
675
+
676
+ #ifdef __cplusplus
677
+ extern "C" {
678
+ #endif
679
+
680
+ bool CPUINFO_ABI cpuinfo_initialize(void);
681
+
682
+ void CPUINFO_ABI cpuinfo_deinitialize(void);
683
+
684
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
685
+ /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */
686
+ struct cpuinfo_x86_isa {
687
+ #if CPUINFO_ARCH_X86
688
+ bool rdtsc;
689
+ #endif
690
+ bool rdtscp;
691
+ bool rdpid;
692
+ bool sysenter;
693
+ #if CPUINFO_ARCH_X86
694
+ bool syscall;
695
+ #endif
696
+ bool msr;
697
+ bool clzero;
698
+ bool clflush;
699
+ bool clflushopt;
700
+ bool mwait;
701
+ bool mwaitx;
702
+ #if CPUINFO_ARCH_X86
703
+ bool emmx;
704
+ #endif
705
+ bool fxsave;
706
+ bool xsave;
707
+ #if CPUINFO_ARCH_X86
708
+ bool fpu;
709
+ bool mmx;
710
+ bool mmx_plus;
711
+ #endif
712
+ bool three_d_now;
713
+ bool three_d_now_plus;
714
+ #if CPUINFO_ARCH_X86
715
+ bool three_d_now_geode;
716
+ #endif
717
+ bool prefetch;
718
+ bool prefetchw;
719
+ bool prefetchwt1;
720
+ #if CPUINFO_ARCH_X86
721
+ bool daz;
722
+ bool sse;
723
+ bool sse2;
724
+ #endif
725
+ bool sse3;
726
+ bool ssse3;
727
+ bool sse4_1;
728
+ bool sse4_2;
729
+ bool sse4a;
730
+ bool misaligned_sse;
731
+ bool avx;
732
+ bool avxvnni;
733
+ bool fma3;
734
+ bool fma4;
735
+ bool xop;
736
+ bool f16c;
737
+ bool avx2;
738
+ bool avx512f;
739
+ bool avx512pf;
740
+ bool avx512er;
741
+ bool avx512cd;
742
+ bool avx512dq;
743
+ bool avx512bw;
744
+ bool avx512vl;
745
+ bool avx512ifma;
746
+ bool avx512vbmi;
747
+ bool avx512vbmi2;
748
+ bool avx512bitalg;
749
+ bool avx512vpopcntdq;
750
+ bool avx512vnni;
751
+ bool avx512bf16;
752
+ bool avx512fp16;
753
+ bool avx512vp2intersect;
754
+ bool avx512_4vnniw;
755
+ bool avx512_4fmaps;
756
+ bool hle;
757
+ bool rtm;
758
+ bool xtest;
759
+ bool mpx;
760
+ #if CPUINFO_ARCH_X86
761
+ bool cmov;
762
+ bool cmpxchg8b;
763
+ #endif
764
+ bool cmpxchg16b;
765
+ bool clwb;
766
+ bool movbe;
767
+ #if CPUINFO_ARCH_X86_64
768
+ bool lahf_sahf;
769
+ #endif
770
+ bool fs_gs_base;
771
+ bool lzcnt;
772
+ bool popcnt;
773
+ bool tbm;
774
+ bool bmi;
775
+ bool bmi2;
776
+ bool adx;
777
+ bool aes;
778
+ bool vaes;
779
+ bool pclmulqdq;
780
+ bool vpclmulqdq;
781
+ bool gfni;
782
+ bool rdrand;
783
+ bool rdseed;
784
+ bool sha;
785
+ bool rng;
786
+ bool ace;
787
+ bool ace2;
788
+ bool phe;
789
+ bool pmm;
790
+ bool lwp;
791
+ };
792
+
793
+ extern struct cpuinfo_x86_isa cpuinfo_isa;
794
+ #endif
795
+
796
+ static inline bool cpuinfo_has_x86_rdtsc(void) {
797
+ #if CPUINFO_ARCH_X86_64
798
+ return true;
799
+ #elif CPUINFO_ARCH_X86
800
+ #if defined(__ANDROID__)
801
+ return true;
802
+ #else
803
+ return cpuinfo_isa.rdtsc;
804
+ #endif
805
+ #else
806
+ return false;
807
+ #endif
808
+ }
809
+
810
+ static inline bool cpuinfo_has_x86_rdtscp(void) {
811
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
812
+ return cpuinfo_isa.rdtscp;
813
+ #else
814
+ return false;
815
+ #endif
816
+ }
817
+
818
+ static inline bool cpuinfo_has_x86_rdpid(void) {
819
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
820
+ return cpuinfo_isa.rdpid;
821
+ #else
822
+ return false;
823
+ #endif
824
+ }
825
+
826
+ static inline bool cpuinfo_has_x86_clzero(void) {
827
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
828
+ return cpuinfo_isa.clzero;
829
+ #else
830
+ return false;
831
+ #endif
832
+ }
833
+
834
+ static inline bool cpuinfo_has_x86_mwait(void) {
835
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
836
+ return cpuinfo_isa.mwait;
837
+ #else
838
+ return false;
839
+ #endif
840
+ }
841
+
842
+ static inline bool cpuinfo_has_x86_mwaitx(void) {
843
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
844
+ return cpuinfo_isa.mwaitx;
845
+ #else
846
+ return false;
847
+ #endif
848
+ }
849
+
850
+ static inline bool cpuinfo_has_x86_fxsave(void) {
851
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
852
+ return cpuinfo_isa.fxsave;
853
+ #else
854
+ return false;
855
+ #endif
856
+ }
857
+
858
+ static inline bool cpuinfo_has_x86_xsave(void) {
859
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
860
+ return cpuinfo_isa.xsave;
861
+ #else
862
+ return false;
863
+ #endif
864
+ }
865
+
866
+ static inline bool cpuinfo_has_x86_fpu(void) {
867
+ #if CPUINFO_ARCH_X86_64
868
+ return true;
869
+ #elif CPUINFO_ARCH_X86
870
+ #if defined(__ANDROID__)
871
+ return true;
872
+ #else
873
+ return cpuinfo_isa.fpu;
874
+ #endif
875
+ #else
876
+ return false;
877
+ #endif
878
+ }
879
+
880
+ static inline bool cpuinfo_has_x86_mmx(void) {
881
+ #if CPUINFO_ARCH_X86_64
882
+ return true;
883
+ #elif CPUINFO_ARCH_X86
884
+ #if defined(__ANDROID__)
885
+ return true;
886
+ #else
887
+ return cpuinfo_isa.mmx;
888
+ #endif
889
+ #else
890
+ return false;
891
+ #endif
892
+ }
893
+
894
+ static inline bool cpuinfo_has_x86_mmx_plus(void) {
895
+ #if CPUINFO_ARCH_X86_64
896
+ return true;
897
+ #elif CPUINFO_ARCH_X86
898
+ #if defined(__ANDROID__)
899
+ return true;
900
+ #else
901
+ return cpuinfo_isa.mmx_plus;
902
+ #endif
903
+ #else
904
+ return false;
905
+ #endif
906
+ }
907
+
908
+ static inline bool cpuinfo_has_x86_3dnow(void) {
909
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
910
+ return cpuinfo_isa.three_d_now;
911
+ #else
912
+ return false;
913
+ #endif
914
+ }
915
+
916
+ static inline bool cpuinfo_has_x86_3dnow_plus(void) {
917
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
918
+ return cpuinfo_isa.three_d_now_plus;
919
+ #else
920
+ return false;
921
+ #endif
922
+ }
923
+
924
+ static inline bool cpuinfo_has_x86_3dnow_geode(void) {
925
+ #if CPUINFO_ARCH_X86_64
926
+ return false;
927
+ #elif CPUINFO_ARCH_X86
928
+ #if defined(__ANDROID__)
929
+ return false;
930
+ #else
931
+ return cpuinfo_isa.three_d_now_geode;
932
+ #endif
933
+ #else
934
+ return false;
935
+ #endif
936
+ }
937
+
938
+ static inline bool cpuinfo_has_x86_prefetch(void) {
939
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
940
+ return cpuinfo_isa.prefetch;
941
+ #else
942
+ return false;
943
+ #endif
944
+ }
945
+
946
+ static inline bool cpuinfo_has_x86_prefetchw(void) {
947
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
948
+ return cpuinfo_isa.prefetchw;
949
+ #else
950
+ return false;
951
+ #endif
952
+ }
953
+
954
+ static inline bool cpuinfo_has_x86_prefetchwt1(void) {
955
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
956
+ return cpuinfo_isa.prefetchwt1;
957
+ #else
958
+ return false;
959
+ #endif
960
+ }
961
+
962
+ static inline bool cpuinfo_has_x86_daz(void) {
963
+ #if CPUINFO_ARCH_X86_64
964
+ return true;
965
+ #elif CPUINFO_ARCH_X86
966
+ #if defined(__ANDROID__)
967
+ return true;
968
+ #else
969
+ return cpuinfo_isa.daz;
970
+ #endif
971
+ #else
972
+ return false;
973
+ #endif
974
+ }
975
+
976
+ static inline bool cpuinfo_has_x86_sse(void) {
977
+ #if CPUINFO_ARCH_X86_64
978
+ return true;
979
+ #elif CPUINFO_ARCH_X86
980
+ #if defined(__ANDROID__)
981
+ return true;
982
+ #else
983
+ return cpuinfo_isa.sse;
984
+ #endif
985
+ #else
986
+ return false;
987
+ #endif
988
+ }
989
+
990
+ static inline bool cpuinfo_has_x86_sse2(void) {
991
+ #if CPUINFO_ARCH_X86_64
992
+ return true;
993
+ #elif CPUINFO_ARCH_X86
994
+ #if defined(__ANDROID__)
995
+ return true;
996
+ #else
997
+ return cpuinfo_isa.sse2;
998
+ #endif
999
+ #else
1000
+ return false;
1001
+ #endif
1002
+ }
1003
+
1004
+ static inline bool cpuinfo_has_x86_sse3(void) {
1005
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1006
+ #if defined(__ANDROID__)
1007
+ return true;
1008
+ #else
1009
+ return cpuinfo_isa.sse3;
1010
+ #endif
1011
+ #else
1012
+ return false;
1013
+ #endif
1014
+ }
1015
+
1016
+ static inline bool cpuinfo_has_x86_ssse3(void) {
1017
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1018
+ #if defined(__ANDROID__)
1019
+ return true;
1020
+ #else
1021
+ return cpuinfo_isa.ssse3;
1022
+ #endif
1023
+ #else
1024
+ return false;
1025
+ #endif
1026
+ }
1027
+
1028
+ static inline bool cpuinfo_has_x86_sse4_1(void) {
1029
+ #if CPUINFO_ARCH_X86_64
1030
+ #if defined(__ANDROID__)
1031
+ return true;
1032
+ #else
1033
+ return cpuinfo_isa.sse4_1;
1034
+ #endif
1035
+ #elif CPUINFO_ARCH_X86
1036
+ return cpuinfo_isa.sse4_1;
1037
+ #else
1038
+ return false;
1039
+ #endif
1040
+ }
1041
+
1042
+ static inline bool cpuinfo_has_x86_sse4_2(void) {
1043
+ #if CPUINFO_ARCH_X86_64
1044
+ #if defined(__ANDROID__)
1045
+ return true;
1046
+ #else
1047
+ return cpuinfo_isa.sse4_2;
1048
+ #endif
1049
+ #elif CPUINFO_ARCH_X86
1050
+ return cpuinfo_isa.sse4_2;
1051
+ #else
1052
+ return false;
1053
+ #endif
1054
+ }
1055
+
1056
+ static inline bool cpuinfo_has_x86_sse4a(void) {
1057
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1058
+ return cpuinfo_isa.sse4a;
1059
+ #else
1060
+ return false;
1061
+ #endif
1062
+ }
1063
+
1064
+ static inline bool cpuinfo_has_x86_misaligned_sse(void) {
1065
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1066
+ return cpuinfo_isa.misaligned_sse;
1067
+ #else
1068
+ return false;
1069
+ #endif
1070
+ }
1071
+
1072
+ static inline bool cpuinfo_has_x86_avx(void) {
1073
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1074
+ return cpuinfo_isa.avx;
1075
+ #else
1076
+ return false;
1077
+ #endif
1078
+ }
1079
+
1080
+ static inline bool cpuinfo_has_x86_avxvnni(void) {
1081
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1082
+ return cpuinfo_isa.avxvnni;
1083
+ #else
1084
+ return false;
1085
+ #endif
1086
+ }
1087
+
1088
+ static inline bool cpuinfo_has_x86_fma3(void) {
1089
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1090
+ return cpuinfo_isa.fma3;
1091
+ #else
1092
+ return false;
1093
+ #endif
1094
+ }
1095
+
1096
+ static inline bool cpuinfo_has_x86_fma4(void) {
1097
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1098
+ return cpuinfo_isa.fma4;
1099
+ #else
1100
+ return false;
1101
+ #endif
1102
+ }
1103
+
1104
+ static inline bool cpuinfo_has_x86_xop(void) {
1105
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1106
+ return cpuinfo_isa.xop;
1107
+ #else
1108
+ return false;
1109
+ #endif
1110
+ }
1111
+
1112
+ static inline bool cpuinfo_has_x86_f16c(void) {
1113
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1114
+ return cpuinfo_isa.f16c;
1115
+ #else
1116
+ return false;
1117
+ #endif
1118
+ }
1119
+
1120
+ static inline bool cpuinfo_has_x86_avx2(void) {
1121
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1122
+ return cpuinfo_isa.avx2;
1123
+ #else
1124
+ return false;
1125
+ #endif
1126
+ }
1127
+
1128
+ static inline bool cpuinfo_has_x86_avx512f(void) {
1129
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1130
+ return cpuinfo_isa.avx512f;
1131
+ #else
1132
+ return false;
1133
+ #endif
1134
+ }
1135
+
1136
+ static inline bool cpuinfo_has_x86_avx512pf(void) {
1137
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1138
+ return cpuinfo_isa.avx512pf;
1139
+ #else
1140
+ return false;
1141
+ #endif
1142
+ }
1143
+
1144
+ static inline bool cpuinfo_has_x86_avx512er(void) {
1145
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1146
+ return cpuinfo_isa.avx512er;
1147
+ #else
1148
+ return false;
1149
+ #endif
1150
+ }
1151
+
1152
+ static inline bool cpuinfo_has_x86_avx512cd(void) {
1153
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1154
+ return cpuinfo_isa.avx512cd;
1155
+ #else
1156
+ return false;
1157
+ #endif
1158
+ }
1159
+
1160
+ static inline bool cpuinfo_has_x86_avx512dq(void) {
1161
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1162
+ return cpuinfo_isa.avx512dq;
1163
+ #else
1164
+ return false;
1165
+ #endif
1166
+ }
1167
+
1168
+ static inline bool cpuinfo_has_x86_avx512bw(void) {
1169
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1170
+ return cpuinfo_isa.avx512bw;
1171
+ #else
1172
+ return false;
1173
+ #endif
1174
+ }
1175
+
1176
+ static inline bool cpuinfo_has_x86_avx512vl(void) {
1177
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1178
+ return cpuinfo_isa.avx512vl;
1179
+ #else
1180
+ return false;
1181
+ #endif
1182
+ }
1183
+
1184
+ static inline bool cpuinfo_has_x86_avx512ifma(void) {
1185
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1186
+ return cpuinfo_isa.avx512ifma;
1187
+ #else
1188
+ return false;
1189
+ #endif
1190
+ }
1191
+
1192
+ static inline bool cpuinfo_has_x86_avx512vbmi(void) {
1193
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1194
+ return cpuinfo_isa.avx512vbmi;
1195
+ #else
1196
+ return false;
1197
+ #endif
1198
+ }
1199
+
1200
+ static inline bool cpuinfo_has_x86_avx512vbmi2(void) {
1201
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1202
+ return cpuinfo_isa.avx512vbmi2;
1203
+ #else
1204
+ return false;
1205
+ #endif
1206
+ }
1207
+
1208
+ static inline bool cpuinfo_has_x86_avx512bitalg(void) {
1209
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1210
+ return cpuinfo_isa.avx512bitalg;
1211
+ #else
1212
+ return false;
1213
+ #endif
1214
+ }
1215
+
1216
+ static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {
1217
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1218
+ return cpuinfo_isa.avx512vpopcntdq;
1219
+ #else
1220
+ return false;
1221
+ #endif
1222
+ }
1223
+
1224
+ static inline bool cpuinfo_has_x86_avx512vnni(void) {
1225
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1226
+ return cpuinfo_isa.avx512vnni;
1227
+ #else
1228
+ return false;
1229
+ #endif
1230
+ }
1231
+
1232
+ static inline bool cpuinfo_has_x86_avx512bf16(void) {
1233
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1234
+ return cpuinfo_isa.avx512bf16;
1235
+ #else
1236
+ return false;
1237
+ #endif
1238
+ }
1239
+
1240
+ static inline bool cpuinfo_has_x86_avx512fp16(void) {
1241
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1242
+ return cpuinfo_isa.avx512fp16;
1243
+ #else
1244
+ return false;
1245
+ #endif
1246
+ }
1247
+
1248
+ static inline bool cpuinfo_has_x86_avx512vp2intersect(void) {
1249
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1250
+ return cpuinfo_isa.avx512vp2intersect;
1251
+ #else
1252
+ return false;
1253
+ #endif
1254
+ }
1255
+
1256
+ static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {
1257
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1258
+ return cpuinfo_isa.avx512_4vnniw;
1259
+ #else
1260
+ return false;
1261
+ #endif
1262
+ }
1263
+
1264
+ static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {
1265
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1266
+ return cpuinfo_isa.avx512_4fmaps;
1267
+ #else
1268
+ return false;
1269
+ #endif
1270
+ }
1271
+
1272
+ static inline bool cpuinfo_has_x86_hle(void) {
1273
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1274
+ return cpuinfo_isa.hle;
1275
+ #else
1276
+ return false;
1277
+ #endif
1278
+ }
1279
+
1280
+ static inline bool cpuinfo_has_x86_rtm(void) {
1281
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1282
+ return cpuinfo_isa.rtm;
1283
+ #else
1284
+ return false;
1285
+ #endif
1286
+ }
1287
+
1288
+ static inline bool cpuinfo_has_x86_xtest(void) {
1289
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1290
+ return cpuinfo_isa.xtest;
1291
+ #else
1292
+ return false;
1293
+ #endif
1294
+ }
1295
+
1296
+ static inline bool cpuinfo_has_x86_mpx(void) {
1297
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1298
+ return cpuinfo_isa.mpx;
1299
+ #else
1300
+ return false;
1301
+ #endif
1302
+ }
1303
+
1304
+ static inline bool cpuinfo_has_x86_cmov(void) {
1305
+ #if CPUINFO_ARCH_X86_64
1306
+ return true;
1307
+ #elif CPUINFO_ARCH_X86
1308
+ return cpuinfo_isa.cmov;
1309
+ #else
1310
+ return false;
1311
+ #endif
1312
+ }
1313
+
1314
+ static inline bool cpuinfo_has_x86_cmpxchg8b(void) {
1315
+ #if CPUINFO_ARCH_X86_64
1316
+ return true;
1317
+ #elif CPUINFO_ARCH_X86
1318
+ return cpuinfo_isa.cmpxchg8b;
1319
+ #else
1320
+ return false;
1321
+ #endif
1322
+ }
1323
+
1324
+ static inline bool cpuinfo_has_x86_cmpxchg16b(void) {
1325
+ #if CPUINFO_ARCH_X86_64
1326
+ return cpuinfo_isa.cmpxchg16b;
1327
+ #else
1328
+ return false;
1329
+ #endif
1330
+ }
1331
+
1332
+ static inline bool cpuinfo_has_x86_clwb(void) {
1333
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1334
+ return cpuinfo_isa.clwb;
1335
+ #else
1336
+ return false;
1337
+ #endif
1338
+ }
1339
+
1340
+ static inline bool cpuinfo_has_x86_movbe(void) {
1341
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1342
+ return cpuinfo_isa.movbe;
1343
+ #else
1344
+ return false;
1345
+ #endif
1346
+ }
1347
+
1348
+ static inline bool cpuinfo_has_x86_lahf_sahf(void) {
1349
+ #if CPUINFO_ARCH_X86
1350
+ return true;
1351
+ #elif CPUINFO_ARCH_X86_64
1352
+ return cpuinfo_isa.lahf_sahf;
1353
+ #else
1354
+ return false;
1355
+ #endif
1356
+ }
1357
+
1358
+ static inline bool cpuinfo_has_x86_lzcnt(void) {
1359
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1360
+ return cpuinfo_isa.lzcnt;
1361
+ #else
1362
+ return false;
1363
+ #endif
1364
+ }
1365
+
1366
+ static inline bool cpuinfo_has_x86_popcnt(void) {
1367
+ #if CPUINFO_ARCH_X86_64
1368
+ #if defined(__ANDROID__)
1369
+ return true;
1370
+ #else
1371
+ return cpuinfo_isa.popcnt;
1372
+ #endif
1373
+ #elif CPUINFO_ARCH_X86
1374
+ return cpuinfo_isa.popcnt;
1375
+ #else
1376
+ return false;
1377
+ #endif
1378
+ }
1379
+
1380
+ static inline bool cpuinfo_has_x86_tbm(void) {
1381
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1382
+ return cpuinfo_isa.tbm;
1383
+ #else
1384
+ return false;
1385
+ #endif
1386
+ }
1387
+
1388
+ static inline bool cpuinfo_has_x86_bmi(void) {
1389
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1390
+ return cpuinfo_isa.bmi;
1391
+ #else
1392
+ return false;
1393
+ #endif
1394
+ }
1395
+
1396
+ static inline bool cpuinfo_has_x86_bmi2(void) {
1397
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1398
+ return cpuinfo_isa.bmi2;
1399
+ #else
1400
+ return false;
1401
+ #endif
1402
+ }
1403
+
1404
+ static inline bool cpuinfo_has_x86_adx(void) {
1405
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1406
+ return cpuinfo_isa.adx;
1407
+ #else
1408
+ return false;
1409
+ #endif
1410
+ }
1411
+
1412
+ static inline bool cpuinfo_has_x86_aes(void) {
1413
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1414
+ return cpuinfo_isa.aes;
1415
+ #else
1416
+ return false;
1417
+ #endif
1418
+ }
1419
+
1420
+ static inline bool cpuinfo_has_x86_vaes(void) {
1421
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1422
+ return cpuinfo_isa.vaes;
1423
+ #else
1424
+ return false;
1425
+ #endif
1426
+ }
1427
+
1428
+ static inline bool cpuinfo_has_x86_pclmulqdq(void) {
1429
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1430
+ return cpuinfo_isa.pclmulqdq;
1431
+ #else
1432
+ return false;
1433
+ #endif
1434
+ }
1435
+
1436
+ static inline bool cpuinfo_has_x86_vpclmulqdq(void) {
1437
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1438
+ return cpuinfo_isa.vpclmulqdq;
1439
+ #else
1440
+ return false;
1441
+ #endif
1442
+ }
1443
+
1444
+ static inline bool cpuinfo_has_x86_gfni(void) {
1445
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1446
+ return cpuinfo_isa.gfni;
1447
+ #else
1448
+ return false;
1449
+ #endif
1450
+ }
1451
+
1452
+ static inline bool cpuinfo_has_x86_rdrand(void) {
1453
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1454
+ return cpuinfo_isa.rdrand;
1455
+ #else
1456
+ return false;
1457
+ #endif
1458
+ }
1459
+
1460
+ static inline bool cpuinfo_has_x86_rdseed(void) {
1461
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1462
+ return cpuinfo_isa.rdseed;
1463
+ #else
1464
+ return false;
1465
+ #endif
1466
+ }
1467
+
1468
+ static inline bool cpuinfo_has_x86_sha(void) {
1469
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1470
+ return cpuinfo_isa.sha;
1471
+ #else
1472
+ return false;
1473
+ #endif
1474
+ }
1475
+
1476
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1477
+ /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */
1478
+ struct cpuinfo_arm_isa {
1479
+ #if CPUINFO_ARCH_ARM
1480
+ bool thumb;
1481
+ bool thumb2;
1482
+ bool thumbee;
1483
+ bool jazelle;
1484
+ bool armv5e;
1485
+ bool armv6;
1486
+ bool armv6k;
1487
+ bool armv7;
1488
+ bool armv7mp;
1489
+ bool armv8;
1490
+ bool idiv;
1491
+
1492
+ bool vfpv2;
1493
+ bool vfpv3;
1494
+ bool d32;
1495
+ bool fp16;
1496
+ bool fma;
1497
+
1498
+ bool wmmx;
1499
+ bool wmmx2;
1500
+ bool neon;
1501
+ #endif
1502
+ #if CPUINFO_ARCH_ARM64
1503
+ bool atomics;
1504
+ bool bf16;
1505
+ bool sve;
1506
+ bool sve2;
1507
+ bool i8mm;
1508
+ #endif
1509
+ bool rdm;
1510
+ bool fp16arith;
1511
+ bool dot;
1512
+ bool jscvt;
1513
+ bool fcma;
1514
+ bool fhm;
1515
+
1516
+ bool aes;
1517
+ bool sha1;
1518
+ bool sha2;
1519
+ bool pmull;
1520
+ bool crc32;
1521
+ };
1522
+
1523
+ extern struct cpuinfo_arm_isa cpuinfo_isa;
1524
+ #endif
1525
+
1526
+ static inline bool cpuinfo_has_arm_thumb(void) {
1527
+ #if CPUINFO_ARCH_ARM
1528
+ return cpuinfo_isa.thumb;
1529
+ #else
1530
+ return false;
1531
+ #endif
1532
+ }
1533
+
1534
+ static inline bool cpuinfo_has_arm_thumb2(void) {
1535
+ #if CPUINFO_ARCH_ARM
1536
+ return cpuinfo_isa.thumb2;
1537
+ #else
1538
+ return false;
1539
+ #endif
1540
+ }
1541
+
1542
+ static inline bool cpuinfo_has_arm_v5e(void) {
1543
+ #if CPUINFO_ARCH_ARM
1544
+ return cpuinfo_isa.armv5e;
1545
+ #else
1546
+ return false;
1547
+ #endif
1548
+ }
1549
+
1550
+ static inline bool cpuinfo_has_arm_v6(void) {
1551
+ #if CPUINFO_ARCH_ARM
1552
+ return cpuinfo_isa.armv6;
1553
+ #else
1554
+ return false;
1555
+ #endif
1556
+ }
1557
+
1558
+ static inline bool cpuinfo_has_arm_v6k(void) {
1559
+ #if CPUINFO_ARCH_ARM
1560
+ return cpuinfo_isa.armv6k;
1561
+ #else
1562
+ return false;
1563
+ #endif
1564
+ }
1565
+
1566
+ static inline bool cpuinfo_has_arm_v7(void) {
1567
+ #if CPUINFO_ARCH_ARM
1568
+ return cpuinfo_isa.armv7;
1569
+ #else
1570
+ return false;
1571
+ #endif
1572
+ }
1573
+
1574
+ static inline bool cpuinfo_has_arm_v7mp(void) {
1575
+ #if CPUINFO_ARCH_ARM
1576
+ return cpuinfo_isa.armv7mp;
1577
+ #else
1578
+ return false;
1579
+ #endif
1580
+ }
1581
+
1582
+ static inline bool cpuinfo_has_arm_v8(void) {
1583
+ #if CPUINFO_ARCH_ARM64
1584
+ return true;
1585
+ #elif CPUINFO_ARCH_ARM
1586
+ return cpuinfo_isa.armv8;
1587
+ #else
1588
+ return false;
1589
+ #endif
1590
+ }
1591
+
1592
+ static inline bool cpuinfo_has_arm_idiv(void) {
1593
+ #if CPUINFO_ARCH_ARM64
1594
+ return true;
1595
+ #elif CPUINFO_ARCH_ARM
1596
+ return cpuinfo_isa.idiv;
1597
+ #else
1598
+ return false;
1599
+ #endif
1600
+ }
1601
+
1602
+ static inline bool cpuinfo_has_arm_vfpv2(void) {
1603
+ #if CPUINFO_ARCH_ARM
1604
+ return cpuinfo_isa.vfpv2;
1605
+ #else
1606
+ return false;
1607
+ #endif
1608
+ }
1609
+
1610
+ static inline bool cpuinfo_has_arm_vfpv3(void) {
1611
+ #if CPUINFO_ARCH_ARM64
1612
+ return true;
1613
+ #elif CPUINFO_ARCH_ARM
1614
+ return cpuinfo_isa.vfpv3;
1615
+ #else
1616
+ return false;
1617
+ #endif
1618
+ }
1619
+
1620
+ static inline bool cpuinfo_has_arm_vfpv3_d32(void) {
1621
+ #if CPUINFO_ARCH_ARM64
1622
+ return true;
1623
+ #elif CPUINFO_ARCH_ARM
1624
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32;
1625
+ #else
1626
+ return false;
1627
+ #endif
1628
+ }
1629
+
1630
+ static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {
1631
+ #if CPUINFO_ARCH_ARM64
1632
+ return true;
1633
+ #elif CPUINFO_ARCH_ARM
1634
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16;
1635
+ #else
1636
+ return false;
1637
+ #endif
1638
+ }
1639
+
1640
+ static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {
1641
+ #if CPUINFO_ARCH_ARM64
1642
+ return true;
1643
+ #elif CPUINFO_ARCH_ARM
1644
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32;
1645
+ #else
1646
+ return false;
1647
+ #endif
1648
+ }
1649
+
1650
+ static inline bool cpuinfo_has_arm_vfpv4(void) {
1651
+ #if CPUINFO_ARCH_ARM64
1652
+ return true;
1653
+ #elif CPUINFO_ARCH_ARM
1654
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma;
1655
+ #else
1656
+ return false;
1657
+ #endif
1658
+ }
1659
+
1660
+ static inline bool cpuinfo_has_arm_vfpv4_d32(void) {
1661
+ #if CPUINFO_ARCH_ARM64
1662
+ return true;
1663
+ #elif CPUINFO_ARCH_ARM
1664
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32;
1665
+ #else
1666
+ return false;
1667
+ #endif
1668
+ }
1669
+
1670
+ static inline bool cpuinfo_has_arm_fp16_arith(void) {
1671
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1672
+ return cpuinfo_isa.fp16arith;
1673
+ #else
1674
+ return false;
1675
+ #endif
1676
+ }
1677
+
1678
+ static inline bool cpuinfo_has_arm_bf16(void) {
1679
+ #if CPUINFO_ARCH_ARM64
1680
+ return cpuinfo_isa.bf16;
1681
+ #else
1682
+ return false;
1683
+ #endif
1684
+ }
1685
+
1686
+ static inline bool cpuinfo_has_arm_wmmx(void) {
1687
+ #if CPUINFO_ARCH_ARM
1688
+ return cpuinfo_isa.wmmx;
1689
+ #else
1690
+ return false;
1691
+ #endif
1692
+ }
1693
+
1694
+ static inline bool cpuinfo_has_arm_wmmx2(void) {
1695
+ #if CPUINFO_ARCH_ARM
1696
+ return cpuinfo_isa.wmmx2;
1697
+ #else
1698
+ return false;
1699
+ #endif
1700
+ }
1701
+
1702
+ static inline bool cpuinfo_has_arm_neon(void) {
1703
+ #if CPUINFO_ARCH_ARM64
1704
+ return true;
1705
+ #elif CPUINFO_ARCH_ARM
1706
+ return cpuinfo_isa.neon;
1707
+ #else
1708
+ return false;
1709
+ #endif
1710
+ }
1711
+
1712
+ static inline bool cpuinfo_has_arm_neon_fp16(void) {
1713
+ #if CPUINFO_ARCH_ARM64
1714
+ return true;
1715
+ #elif CPUINFO_ARCH_ARM
1716
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16;
1717
+ #else
1718
+ return false;
1719
+ #endif
1720
+ }
1721
+
1722
+ static inline bool cpuinfo_has_arm_neon_fma(void) {
1723
+ #if CPUINFO_ARCH_ARM64
1724
+ return true;
1725
+ #elif CPUINFO_ARCH_ARM
1726
+ return cpuinfo_isa.neon && cpuinfo_isa.fma;
1727
+ #else
1728
+ return false;
1729
+ #endif
1730
+ }
1731
+
1732
+ static inline bool cpuinfo_has_arm_neon_v8(void) {
1733
+ #if CPUINFO_ARCH_ARM64
1734
+ return true;
1735
+ #elif CPUINFO_ARCH_ARM
1736
+ return cpuinfo_isa.neon && cpuinfo_isa.armv8;
1737
+ #else
1738
+ return false;
1739
+ #endif
1740
+ }
1741
+
1742
+ static inline bool cpuinfo_has_arm_atomics(void) {
1743
+ #if CPUINFO_ARCH_ARM64
1744
+ return cpuinfo_isa.atomics;
1745
+ #else
1746
+ return false;
1747
+ #endif
1748
+ }
1749
+
1750
+ static inline bool cpuinfo_has_arm_neon_rdm(void) {
1751
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1752
+ return cpuinfo_isa.rdm;
1753
+ #else
1754
+ return false;
1755
+ #endif
1756
+ }
1757
+
1758
+ static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {
1759
+ #if CPUINFO_ARCH_ARM
1760
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16arith;
1761
+ #elif CPUINFO_ARCH_ARM64
1762
+ return cpuinfo_isa.fp16arith;
1763
+ #else
1764
+ return false;
1765
+ #endif
1766
+ }
1767
+
1768
+ static inline bool cpuinfo_has_arm_fhm(void) {
1769
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1770
+ return cpuinfo_isa.fhm;
1771
+ #else
1772
+ return false;
1773
+ #endif
1774
+ }
1775
+
1776
+ static inline bool cpuinfo_has_arm_neon_dot(void) {
1777
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1778
+ return cpuinfo_isa.dot;
1779
+ #else
1780
+ return false;
1781
+ #endif
1782
+ }
1783
+
1784
+ static inline bool cpuinfo_has_arm_neon_bf16(void) {
1785
+ #if CPUINFO_ARCH_ARM64
1786
+ return cpuinfo_isa.bf16;
1787
+ #else
1788
+ return false;
1789
+ #endif
1790
+ }
1791
+
1792
+ static inline bool cpuinfo_has_arm_jscvt(void) {
1793
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1794
+ return cpuinfo_isa.jscvt;
1795
+ #else
1796
+ return false;
1797
+ #endif
1798
+ }
1799
+
1800
+ static inline bool cpuinfo_has_arm_fcma(void) {
1801
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1802
+ return cpuinfo_isa.fcma;
1803
+ #else
1804
+ return false;
1805
+ #endif
1806
+ }
1807
+
1808
+ static inline bool cpuinfo_has_arm_i8mm(void) {
1809
+ #if CPUINFO_ARCH_ARM64
1810
+ return cpuinfo_isa.i8mm;
1811
+ #else
1812
+ return false;
1813
+ #endif
1814
+ }
1815
+
1816
+ static inline bool cpuinfo_has_arm_aes(void) {
1817
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1818
+ return cpuinfo_isa.aes;
1819
+ #else
1820
+ return false;
1821
+ #endif
1822
+ }
1823
+
1824
+ static inline bool cpuinfo_has_arm_sha1(void) {
1825
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1826
+ return cpuinfo_isa.sha1;
1827
+ #else
1828
+ return false;
1829
+ #endif
1830
+ }
1831
+
1832
+ static inline bool cpuinfo_has_arm_sha2(void) {
1833
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1834
+ return cpuinfo_isa.sha2;
1835
+ #else
1836
+ return false;
1837
+ #endif
1838
+ }
1839
+
1840
+ static inline bool cpuinfo_has_arm_pmull(void) {
1841
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1842
+ return cpuinfo_isa.pmull;
1843
+ #else
1844
+ return false;
1845
+ #endif
1846
+ }
1847
+
1848
+ static inline bool cpuinfo_has_arm_crc32(void) {
1849
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1850
+ return cpuinfo_isa.crc32;
1851
+ #else
1852
+ return false;
1853
+ #endif
1854
+ }
1855
+
1856
+ static inline bool cpuinfo_has_arm_sve(void) {
1857
+ #if CPUINFO_ARCH_ARM64
1858
+ return cpuinfo_isa.sve;
1859
+ #else
1860
+ return false;
1861
+ #endif
1862
+ }
1863
+
1864
+ static inline bool cpuinfo_has_arm_sve_bf16(void) {
1865
+ #if CPUINFO_ARCH_ARM64
1866
+ return cpuinfo_isa.sve && cpuinfo_isa.bf16;
1867
+ #else
1868
+ return false;
1869
+ #endif
1870
+ }
1871
+
1872
+ static inline bool cpuinfo_has_arm_sve2(void) {
1873
+ #if CPUINFO_ARCH_ARM64
1874
+ return cpuinfo_isa.sve2;
1875
+ #else
1876
+ return false;
1877
+ #endif
1878
+ }
1879
+
1880
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
1881
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
1882
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
1883
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
1884
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void);
1885
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
1886
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
1887
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
1888
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
1889
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
1890
+
1891
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
1892
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
1893
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
1894
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
1895
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index);
1896
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
1897
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
1898
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
1899
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
1900
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
1901
+
1902
+ uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
1903
+ uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
1904
+ uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
1905
+ uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
1906
+ uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void);
1907
+ uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
1908
+ uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
1909
+ uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
1910
+ uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
1911
+ uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
1912
+
1913
+ /**
1914
+ * Returns upper bound on cache size.
1915
+ */
1916
+ uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void);
1917
+
1918
+ /**
1919
+ * Identify the logical processor that executes the current thread.
1920
+ *
1921
+ * There is no guarantee that the thread will stay on the same logical processor for any time.
1922
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1923
+ */
1924
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);
1925
+
1926
+ /**
1927
+ * Identify the core that executes the current thread.
1928
+ *
1929
+ * There is no guarantee that the thread will stay on the same core for any time.
1930
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1931
+ */
1932
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
1933
+
1934
+ /**
1935
+ * Identify the microarchitecture index of the core that executes the current thread.
1936
+ * If the system does not support such identification, the function returns 0.
1937
+ *
1938
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1939
+ * Callers should treat the result as only a hint.
1940
+ */
1941
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void);
1942
+
1943
+ /**
1944
+ * Identify the microarchitecture index of the core that executes the current thread.
1945
+ * If the system does not support such identification, the function returns the user-specified default value.
1946
+ *
1947
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1948
+ * Callers should treat the result as only a hint.
1949
+ */
1950
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index);
1951
+
1952
+ #ifdef __cplusplus
1953
+ } /* extern "C" */
1954
+ #endif
1955
+
1956
+ #endif /* CPUINFO_H */
venv/lib/python3.10/site-packages/torch/include/dnnl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_H
18
+ #define DNNL_H
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #endif /* DNNL_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_config.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_CONFIG_H
18
+ #define DNNL_CONFIG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_config.h"
21
+
22
+ #endif /* DNNL_CONFIG_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_debug.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_DEBUG_H
18
+ #define DNNL_DEBUG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_debug.h"
21
+
22
+ #endif /* DNNL_DEBUG_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_ocl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_OCL_H
18
+ #define DNNL_OCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_ocl.h"
21
+
22
+ #endif /* DNNL_OCL_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_sycl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_H
18
+ #define DNNL_SYCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl.h"
21
+
22
+ #endif /* DNNL_SYCL_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_TYPES_H
18
+ #define DNNL_SYCL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl_types.h"
21
+
22
+ #endif /* DNNL_SYCL_TYPES_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_THREADPOOL_H
18
+ #define DNNL_THREADPOOL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_threadpool.h"
21
+
22
+ #endif /* DNNL_THREADPOOL_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_TYPES_H
18
+ #define DNNL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_types.h"
21
+
22
+ #endif /* DNNL_TYPES_H */
venv/lib/python3.10/site-packages/torch/include/dnnl_version.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_VERSION_H
18
+ #define DNNL_VERSION_H
19
+
20
+ #include "oneapi/dnnl/dnnl_version.h"
21
+
22
+ #endif /* DNNL_VERSION_H */
venv/lib/python3.10/site-packages/torch/include/experiments-config.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023 Google LLC
2
+ //
3
+ // This source code is licensed under the BSD-style license found in the
4
+ // LICENSE file in the root directory of this source tree.
5
+
6
+ #pragma once
7
+
8
+ #include <stdbool.h>
9
+
10
+ #ifdef __cplusplus
11
+ extern "C" {
12
+ #endif
13
+
14
+ struct xnn_experiment_config {
15
+ bool adaptive_avx_optimization;
16
+ };
17
+
18
+ struct xnn_experiment_config* xnn_get_experiment_config();
19
+
20
+ void xnn_experiment_enable_adaptive_avx_optimization();
21
+
22
+
23
+ #ifdef __cplusplus
24
+ } // extern "C"
25
+ #endif
venv/lib/python3.10/site-packages/torch/include/fp16.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FP16_H
3
+ #define FP16_H
4
+
5
+ #include <fp16/fp16.h>
6
+
7
+ #if defined(PSIMD_H)
8
+ #include <fp16/psimd.h>
9
+ #endif
10
+
11
+ #endif /* FP16_H */
venv/lib/python3.10/site-packages/torch/include/fxdiv.h ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FXDIV_H
3
+ #define FXDIV_H
4
+
5
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
6
+ #include <cstddef>
7
+ #include <cstdint>
8
+ #include <climits>
9
+ #elif !defined(__OPENCL_VERSION__)
10
+ #include <stddef.h>
11
+ #include <stdint.h>
12
+ #include <limits.h>
13
+ #endif
14
+
15
+ #if defined(_MSC_VER)
16
+ #include <intrin.h>
17
+ #if defined(_M_IX86) || defined(_M_X64)
18
+ #include <immintrin.h>
19
+ #endif
20
+ #endif
21
+
22
+ #ifndef FXDIV_USE_INLINE_ASSEMBLY
23
+ #define FXDIV_USE_INLINE_ASSEMBLY 0
24
+ #endif
25
+
26
+ static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) {
27
+ #if defined(_MSC_VER) && defined(_M_IX86)
28
+ return (uint64_t) __emulu((unsigned int) a, (unsigned int) b);
29
+ #else
30
+ return (uint64_t) a * (uint64_t) b;
31
+ #endif
32
+ }
33
+
34
+ static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) {
35
+ #if defined(__OPENCL_VERSION__)
36
+ return mul_hi(a, b);
37
+ #elif defined(__CUDA_ARCH__)
38
+ return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b);
39
+ #elif defined(_MSC_VER) && defined(_M_IX86)
40
+ return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32);
41
+ #elif defined(_MSC_VER) && defined(_M_ARM)
42
+ return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b);
43
+ #else
44
+ return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32);
45
+ #endif
46
+ }
47
+
48
+ static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) {
49
+ #if defined(__OPENCL_VERSION__)
50
+ return mul_hi(a, b);
51
+ #elif defined(__CUDA_ARCH__)
52
+ return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b);
53
+ #elif defined(_MSC_VER) && defined(_M_X64)
54
+ return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b);
55
+ #elif defined(__GNUC__) && defined(__SIZEOF_INT128__)
56
+ return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64);
57
+ #else
58
+ const uint32_t a_lo = (uint32_t) a;
59
+ const uint32_t a_hi = (uint32_t) (a >> 32);
60
+ const uint32_t b_lo = (uint32_t) b;
61
+ const uint32_t b_hi = (uint32_t) (b >> 32);
62
+
63
+ const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) +
64
+ (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo);
65
+ return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) +
66
+ ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32);
67
+ #endif
68
+ }
69
+
70
+ static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) {
71
+ #if SIZE_MAX == UINT32_MAX
72
+ return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b);
73
+ #elif SIZE_MAX == UINT64_MAX
74
+ return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b);
75
+ #else
76
+ #error Unsupported platform
77
+ #endif
78
+ }
79
+
80
+ struct fxdiv_divisor_uint32_t {
81
+ uint32_t value;
82
+ uint32_t m;
83
+ uint8_t s1;
84
+ uint8_t s2;
85
+ };
86
+
87
+ struct fxdiv_result_uint32_t {
88
+ uint32_t quotient;
89
+ uint32_t remainder;
90
+ };
91
+
92
+ struct fxdiv_divisor_uint64_t {
93
+ uint64_t value;
94
+ uint64_t m;
95
+ uint8_t s1;
96
+ uint8_t s2;
97
+ };
98
+
99
+ struct fxdiv_result_uint64_t {
100
+ uint64_t quotient;
101
+ uint64_t remainder;
102
+ };
103
+
104
+ struct fxdiv_divisor_size_t {
105
+ size_t value;
106
+ size_t m;
107
+ uint8_t s1;
108
+ uint8_t s2;
109
+ };
110
+
111
+ struct fxdiv_result_size_t {
112
+ size_t quotient;
113
+ size_t remainder;
114
+ };
115
+
116
+ static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) {
117
+ struct fxdiv_divisor_uint32_t result = { d };
118
+ if (d == 1) {
119
+ result.m = UINT32_C(1);
120
+ result.s1 = 0;
121
+ result.s2 = 0;
122
+ } else {
123
+ #if defined(__OPENCL_VERSION__)
124
+ const uint32_t l_minus_1 = 31 - clz(d - 1);
125
+ #elif defined(__CUDA_ARCH__)
126
+ const uint32_t l_minus_1 = 31 - __clz((int) (d - 1));
127
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64))
128
+ unsigned long l_minus_1;
129
+ _BitScanReverse(&l_minus_1, (unsigned long) (d - 1));
130
+ #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY
131
+ uint32_t l_minus_1;
132
+ __asm__("BSRL %[d_minus_1], %[l_minus_1]"
133
+ : [l_minus_1] "=r" (l_minus_1)
134
+ : [d_minus_1] "r" (d - 1)
135
+ : "cc");
136
+ #elif defined(__GNUC__)
137
+ const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1);
138
+ #else
139
+ /* Based on Algorithm 2 from Hacker's delight */
140
+
141
+ uint32_t l_minus_1 = 0;
142
+ uint32_t x = d - 1;
143
+ uint32_t y = x >> 16;
144
+ if (y != 0) {
145
+ l_minus_1 += 16;
146
+ x = y;
147
+ }
148
+ y = x >> 8;
149
+ if (y != 0) {
150
+ l_minus_1 += 8;
151
+ x = y;
152
+ }
153
+ y = x >> 4;
154
+ if (y != 0) {
155
+ l_minus_1 += 4;
156
+ x = y;
157
+ }
158
+ y = x >> 2;
159
+ if (y != 0) {
160
+ l_minus_1 += 2;
161
+ x = y;
162
+ }
163
+ if ((x & 2) != 0) {
164
+ l_minus_1 += 1;
165
+ }
166
+ #endif
167
+ uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d;
168
+
169
+ /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */
170
+ #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY
171
+ uint32_t q;
172
+ __asm__("DIVL %[d]"
173
+ : "=a" (q), "+d" (u_hi)
174
+ : [d] "r" (d), "a" (0)
175
+ : "cc");
176
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64))
177
+ unsigned int remainder;
178
+ const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder);
179
+ #else
180
+ const uint32_t q = ((uint64_t) u_hi << 32) / d;
181
+ #endif
182
+
183
+ result.m = q + UINT32_C(1);
184
+ result.s1 = 1;
185
+ result.s2 = (uint8_t) l_minus_1;
186
+ }
187
+ return result;
188
+ }
189
+
190
+ static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) {
191
+ struct fxdiv_divisor_uint64_t result = { d };
192
+ if (d == 1) {
193
+ result.m = UINT64_C(1);
194
+ result.s1 = 0;
195
+ result.s2 = 0;
196
+ } else {
197
+ #if defined(__OPENCL_VERSION__)
198
+ const uint32_t nlz_d = clz(d);
199
+ const uint32_t l_minus_1 = 63 - clz(d - 1);
200
+ #elif defined(__CUDA_ARCH__)
201
+ const uint32_t nlz_d = __clzll((long long) d);
202
+ const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1));
203
+ #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
204
+ unsigned long l_minus_1;
205
+ _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1));
206
+ unsigned long bsr_d;
207
+ _BitScanReverse64(&bsr_d, (unsigned __int64) d);
208
+ const uint32_t nlz_d = bsr_d ^ 0x3F;
209
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM))
210
+ const uint64_t d_minus_1 = d - 1;
211
+ const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0;
212
+ unsigned long l_minus_1;
213
+ if ((uint32_t) (d_minus_1 >> 32) == 0) {
214
+ _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1);
215
+ } else {
216
+ _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32));
217
+ l_minus_1 += 32;
218
+ }
219
+ const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2;
220
+ #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
221
+ uint64_t l_minus_1;
222
+ __asm__("BSRQ %[d_minus_1], %[l_minus_1]"
223
+ : [l_minus_1] "=r" (l_minus_1)
224
+ : [d_minus_1] "r" (d - 1)
225
+ : "cc");
226
+ #elif defined(__GNUC__)
227
+ const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1);
228
+ const uint32_t nlz_d = __builtin_clzll(d);
229
+ #else
230
+ /* Based on Algorithm 2 from Hacker's delight */
231
+ const uint64_t d_minus_1 = d - 1;
232
+ const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0;
233
+ uint32_t l_minus_1 = 0;
234
+ uint32_t x = (uint32_t) d_minus_1;
235
+ uint32_t y = d_minus_1 >> 32;
236
+ if (y != 0) {
237
+ l_minus_1 += 32;
238
+ x = y;
239
+ }
240
+ y = x >> 16;
241
+ if (y != 0) {
242
+ l_minus_1 += 16;
243
+ x = y;
244
+ }
245
+ y = x >> 8;
246
+ if (y != 0) {
247
+ l_minus_1 += 8;
248
+ x = y;
249
+ }
250
+ y = x >> 4;
251
+ if (y != 0) {
252
+ l_minus_1 += 4;
253
+ x = y;
254
+ }
255
+ y = x >> 2;
256
+ if (y != 0) {
257
+ l_minus_1 += 2;
258
+ x = y;
259
+ }
260
+ if ((x & 2) != 0) {
261
+ l_minus_1 += 1;
262
+ }
263
+ const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2;
264
+ #endif
265
+ uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d;
266
+
267
+ /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */
268
+ #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
269
+ uint64_t q;
270
+ __asm__("DIVQ %[d]"
271
+ : "=a" (q), "+d" (u_hi)
272
+ : [d] "r" (d), "a" (UINT64_C(0))
273
+ : "cc");
274
+ #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__)
275
+ /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */
276
+ const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d));
277
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64)
278
+ unsigned __int64 remainder;
279
+ const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder);
280
+ #else
281
+ /* Implementation based on code from Hacker's delight */
282
+
283
+ /* Normalize divisor and shift divident left */
284
+ d <<= nlz_d;
285
+ u_hi <<= nlz_d;
286
+ /* Break divisor up into two 32-bit digits */
287
+ const uint64_t d_hi = (uint32_t) (d >> 32);
288
+ const uint32_t d_lo = (uint32_t) d;
289
+
290
+ /* Compute the first quotient digit, q1 */
291
+ uint64_t q1 = u_hi / d_hi;
292
+ uint64_t r1 = u_hi - q1 * d_hi;
293
+
294
+ while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) {
295
+ q1 -= 1;
296
+ r1 += d_hi;
297
+ if ((r1 >> 32) != 0) {
298
+ break;
299
+ }
300
+ }
301
+
302
+ /* Multiply and subtract. */
303
+ u_hi = (u_hi << 32) - q1 * d;
304
+
305
+ /* Compute the second quotient digit, q0 */
306
+ uint64_t q0 = u_hi / d_hi;
307
+ uint64_t r0 = u_hi - q0 * d_hi;
308
+
309
+ while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) {
310
+ q0 -= 1;
311
+ r0 += d_hi;
312
+ if ((r0 >> 32) != 0) {
313
+ break;
314
+ }
315
+ }
316
+ const uint64_t q = (q1 << 32) | (uint32_t) q0;
317
+ #endif
318
+ result.m = q + UINT64_C(1);
319
+ result.s1 = 1;
320
+ result.s2 = (uint8_t) l_minus_1;
321
+ }
322
+ return result;
323
+ }
324
+
325
+ static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) {
326
+ #if SIZE_MAX == UINT32_MAX
327
+ const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d);
328
+ #elif SIZE_MAX == UINT64_MAX
329
+ const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d);
330
+ #else
331
+ #error Unsupported platform
332
+ #endif
333
+ struct fxdiv_divisor_size_t size_result = {
334
+ (size_t) uint_result.value,
335
+ (size_t) uint_result.m,
336
+ uint_result.s1,
337
+ uint_result.s2
338
+ };
339
+ return size_result;
340
+ }
341
+
342
+ static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
343
+ const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m);
344
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
345
+ }
346
+
347
+ static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
348
+ const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m);
349
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
350
+ }
351
+
352
+ static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
353
+ #if SIZE_MAX == UINT32_MAX
354
+ const struct fxdiv_divisor_uint32_t uint32_divisor = {
355
+ (uint32_t) divisor.value,
356
+ (uint32_t) divisor.m,
357
+ divisor.s1,
358
+ divisor.s2
359
+ };
360
+ return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor);
361
+ #elif SIZE_MAX == UINT64_MAX
362
+ const struct fxdiv_divisor_uint64_t uint64_divisor = {
363
+ (uint64_t) divisor.value,
364
+ (uint64_t) divisor.m,
365
+ divisor.s1,
366
+ divisor.s2
367
+ };
368
+ return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor);
369
+ #else
370
+ #error Unsupported platform
371
+ #endif
372
+ }
373
+
374
+ static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
375
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
376
+ return n - quotient * divisor.value;
377
+ }
378
+
379
+ static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
380
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
381
+ return n - quotient * divisor.value;
382
+ }
383
+
384
+ static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
385
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
386
+ return n - quotient * divisor.value;
387
+ }
388
+
389
+ static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) {
390
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity);
391
+ return quotient * granularity.value;
392
+ }
393
+
394
+ static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) {
395
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity);
396
+ return quotient * granularity.value;
397
+ }
398
+
399
+ static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) {
400
+ const size_t quotient = fxdiv_quotient_size_t(n, granularity);
401
+ return quotient * granularity.value;
402
+ }
403
+
404
+ static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
405
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
406
+ const uint32_t remainder = n - quotient * divisor.value;
407
+ struct fxdiv_result_uint32_t result = { quotient, remainder };
408
+ return result;
409
+ }
410
+
411
+ static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
412
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
413
+ const uint64_t remainder = n - quotient * divisor.value;
414
+ struct fxdiv_result_uint64_t result = { quotient, remainder };
415
+ return result;
416
+ }
417
+
418
+ static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
419
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
420
+ const size_t remainder = n - quotient * divisor.value;
421
+ struct fxdiv_result_size_t result = { quotient, remainder };
422
+ return result;
423
+ }
424
+
425
+ #endif /* FXDIV_H */
venv/lib/python3.10/site-packages/torch/include/libshm.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/MapAllocator.h>
4
+
5
+ #ifdef __cplusplus
6
+
7
+ void libshm_init(const char* manager_exec_path);
8
+
9
+ // Superclass to run a constructor before at::RefcountedMapAllocator
10
+ class THManagedMapAllocatorInit {
11
+ protected:
12
+ THManagedMapAllocatorInit(const char* manager_handle, const char* filename);
13
+ std::string manager_handle_;
14
+ };
15
+
16
+ // Like a at::RefcountedMapAllocator, but it also makes use of an external
17
+ // shared memory manager process to ensure that shared memory regions actually
18
+ // get freed in the end (even if processes lose the memory).
19
+ class THManagedMapAllocator : private THManagedMapAllocatorInit,
20
+ public at::RefcountedMapAllocator {
21
+ public:
22
+ THManagedMapAllocator(
23
+ const char* manager_handle,
24
+ const char* filename,
25
+ int flags,
26
+ size_t size);
27
+
28
+ void close() override;
29
+
30
+ ~THManagedMapAllocator() override {
31
+ close();
32
+ }
33
+
34
+ static at::DataPtr makeDataPtr(
35
+ const char* manager_handle,
36
+ const char* filename,
37
+ int flags,
38
+ size_t size);
39
+ static THManagedMapAllocator* fromDataPtr(const at::DataPtr&);
40
+
41
+ const char* manager_handle() const {
42
+ return manager_handle_.c_str();
43
+ }
44
+ };
45
+
46
+ #endif
venv/lib/python3.10/site-packages/torch/include/nnpack.h ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stddef.h>
4
+ #include <stdint.h>
5
+ #include <stdbool.h>
6
+
7
+ #include <pthreadpool.h>
8
+
9
+ #ifdef __cplusplus
10
+ extern "C" {
11
+ #endif
12
+
13
+ /**
14
+ * @brief Status code for any NNPACK function call.
15
+ */
16
+ enum nnp_status {
17
+ /** The call succeeded, and all output arguments now contain valid data. */
18
+ nnp_status_success = 0,
19
+ /** NNPACK function was called with batch_size == 0. */
20
+ nnp_status_invalid_batch_size = 2,
21
+ /** NNPACK function was called with channels == 0. */
22
+ nnp_status_invalid_channels = 3,
23
+ /** NNPACK function was called with input_channels == 0. */
24
+ nnp_status_invalid_input_channels = 4,
25
+ /** NNPACK function was called with output_channels == 0. */
26
+ nnp_status_invalid_output_channels = 5,
27
+ /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */
28
+ nnp_status_invalid_input_size = 10,
29
+ /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */
30
+ nnp_status_invalid_input_stride = 11,
31
+ /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.:
32
+ *
33
+ * - input_padding.left >= kernel_size.width (>= pooling_size.width)
34
+ * - input_padding.right >= kernel_size.width (>= pooling_size.width)
35
+ * - input_padding.top >= kernel_size.height (>= pooling_size.height)
36
+ * - input_padding.bottom >= kernel_size.height (>= pooling_size.height)
37
+ */
38
+ nnp_status_invalid_input_padding = 12,
39
+ /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */
40
+ nnp_status_invalid_kernel_size = 13,
41
+ /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */
42
+ nnp_status_invalid_pooling_size = 14,
43
+ /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */
44
+ nnp_status_invalid_pooling_stride = 15,
45
+ /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */
46
+ nnp_status_invalid_algorithm = 16,
47
+ /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */
48
+ nnp_status_invalid_transform_strategy = 17,
49
+ /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */
50
+ nnp_status_invalid_output_subsampling = 13,
51
+ /** NNPACK function was called with activation not in nnp_activation enum */
52
+ nnp_status_invalid_activation = 14,
53
+ /** NNPACK function was called with invalid activation parameters */
54
+ nnp_status_invalid_activation_parameters = 15,
55
+
56
+ /** NNPACK does not support the particular input size for the function */
57
+ nnp_status_unsupported_input_size = 20,
58
+ /** NNPACK does not support the particular input stride for the function */
59
+ nnp_status_unsupported_input_stride = 21,
60
+ /** NNPACK does not support the particular input padding for the function */
61
+ nnp_status_unsupported_input_padding = 22,
62
+ /** NNPACK does not support the particular kernel size for the function */
63
+ nnp_status_unsupported_kernel_size = 23,
64
+ /** NNPACK does not support the particular pooling size for the function */
65
+ nnp_status_unsupported_pooling_size = 24,
66
+ /** NNPACK does not support the particular pooling stride for the function */
67
+ nnp_status_unsupported_pooling_stride = 25,
68
+ /** NNPACK does not support the particular convolution algorithm for the function */
69
+ nnp_status_unsupported_algorithm = 26,
70
+ /** NNPACK does not support the particular convolution transform strategy for the algorithm */
71
+ nnp_status_unsupported_transform_strategy = 27,
72
+ /** NNPACK does not support the particular activation function for the function */
73
+ nnp_status_unsupported_activation = 28,
74
+ /** NNPACK does not support the particular activation function parameters for the function */
75
+ nnp_status_unsupported_activation_parameters = 29,
76
+
77
+ /** NNPACK function was called before the library was initialized */
78
+ nnp_status_uninitialized = 50,
79
+ /** NNPACK does not implement this function for the host CPU */
80
+ nnp_status_unsupported_hardware = 51,
81
+ /** NNPACK failed to allocate memory for temporary buffers */
82
+ nnp_status_out_of_memory = 52,
83
+ /** Scratch space buffer is too small */
84
+ nnp_status_insufficient_buffer = 53,
85
+ /** Scratch space buffer is not properly aligned */
86
+ nnp_status_misaligned_buffer = 54
87
+ };
88
+
89
+ /**
90
+ * @brief Activation applied applied after a convolutional or fully-connected layer.
91
+ */
92
+ enum nnp_activation {
93
+ /** Identity activation f(x) := x, i.e. no transformation */
94
+ nnp_activation_identity = 0,
95
+ /** ReLU activation f(x) := max(0, x) */
96
+ nnp_activation_relu = 1,
97
+ };
98
+
99
+ /**
100
+ * @brief Algorithm for computing convolutional layers.
101
+ */
102
+ enum nnp_convolution_algorithm {
103
+ /** Let NNPACK choose the algorithm depending on layer parameters */
104
+ nnp_convolution_algorithm_auto = 0,
105
+ /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */
106
+ nnp_convolution_algorithm_ft8x8 = 1,
107
+ /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */
108
+ nnp_convolution_algorithm_ft16x16 = 2,
109
+ /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */
110
+ nnp_convolution_algorithm_wt8x8 = 3,
111
+ /** Direct convolution via implicit GEMM. */
112
+ nnp_convolution_algorithm_implicit_gemm = 4,
113
+ /** Direct convolution implementation. */
114
+ nnp_convolution_algorithm_direct = 5,
115
+ /**
116
+ * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16.
117
+ * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP),
118
+ * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8.
119
+ */
120
+ nnp_convolution_algorithm_wt8x8_fp16 = 6,
121
+ };
122
+
123
+ enum nnp_convolution_transform_strategy {
124
+ nnp_convolution_transform_strategy_compute = 1,
125
+ nnp_convolution_transform_strategy_precompute = 2,
126
+ nnp_convolution_transform_strategy_reuse = 3
127
+ };
128
+
129
+ /* For backward compatibility */
130
+ #define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute
131
+ #define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute
132
+
133
+ /**
134
+ * @brief Size of images, kernels, and pooling filters in NNPACK.
135
+ */
136
+ struct nnp_size {
137
+ /** Width (horizontal size) of an image, kernel, or pooling filter. */
138
+ size_t width;
139
+ /** Height (vertical size) of an image, kernel, or pooling filter. */
140
+ size_t height;
141
+ };
142
+
143
+ /**
144
+ * @brief Padding of images in NNPACK.
145
+ */
146
+ struct nnp_padding {
147
+ /** Padding above the image data */
148
+ size_t top;
149
+ /** Padding on the right of image data */
150
+ size_t right;
151
+ /** Padding below the image data */
152
+ size_t bottom;
153
+ /** Padding on the left of image data */
154
+ size_t left;
155
+ };
156
+
157
+ /**
158
+ * @brief Profiling information about time spent in different phases of a function call.
159
+ */
160
+ struct nnp_profile {
161
+ /** Time spent inside the function call, in seconds. */
162
+ double total;
163
+ /** Time spend on transformation of the input or input gradient tensor, in seconds. */
164
+ double input_transform;
165
+ /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */
166
+ double kernel_transform;
167
+ /** Time spend on transformation of the output or output gradient tensor, in seconds. */
168
+ double output_transform;
169
+ /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */
170
+ double block_multiplication;
171
+ };
172
+
173
+ enum nnp_status nnp_initialize(void);
174
+
175
+ enum nnp_status nnp_deinitialize(void);
176
+
177
+ /**
178
+ * @brief Computes output of a 2D convolutional layer from input and kernel tensors.
179
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
180
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
181
+ * For minibatch size 1, use nnp_convolution_inference for optimal performance.
182
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
183
+ *
184
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
185
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
186
+ * Supports kernels up to 8x8.
187
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
188
+ * Supports kernels up to 16x16.
189
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
190
+ * Supports only 3x3 kernels.
191
+ *
192
+ * @param batch_size The number of images on the input and output of the convolutional layer.
193
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
194
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images.
195
+ * @param input_size Size of input images, excluding implicit zero-padding.
196
+ * @param input_padding Implicit zero-padding of input images.
197
+ * @param kernel_size Kernel size.
198
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
199
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
200
+ * @param[in] bias A 1D array bias[output_channels].
201
+ * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where
202
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
203
+ * (kernel_size.height - 1)
204
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
205
+ * (kernel_size.width - 1)
206
+ * @param threadpool A thread pool for parallelization of the computation.
207
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
208
+ * @param[out] profile An optional pointer to profiling structure.
209
+ * If provided, the structure would record time spent in different phases of the computation.
210
+ */
211
+
212
+ enum nnp_status nnp_convolution_output(
213
+ enum nnp_convolution_algorithm algorithm,
214
+ size_t batch_size,
215
+ size_t input_channels,
216
+ size_t output_channels,
217
+ struct nnp_size input_size,
218
+ struct nnp_padding input_padding,
219
+ struct nnp_size kernel_size,
220
+ const float* input,
221
+ const float* kernel,
222
+ const float* bias,
223
+ float* output,
224
+ void* workspace_buffer,
225
+ size_t* workspace_size,
226
+ enum nnp_activation activation,
227
+ const void* activation_parameters,
228
+ pthreadpool_t threadpool,
229
+ struct nnp_profile* profile);
230
+
231
+ /**
232
+ * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors.
233
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
234
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
235
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
236
+ *
237
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
238
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
239
+ * Supports kernels up to 8x8.
240
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
241
+ * Supports kernels up to 16x16.
242
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
243
+ * Supports only 3x3 kernels.
244
+ *
245
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
246
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients).
247
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
248
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
249
+ * @param input_padding Implicit zero-padding of input images.
250
+ * @param kernel_size Kernel size.
251
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
252
+ * where
253
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
254
+ * (kernel_size.height - 1)
255
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
256
+ * (kernel_size.width - 1)
257
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
258
+ * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width].
259
+ * @param threadpool A thread pool for parallelization of the computation.
260
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
261
+ * @param[out] profile An optional pointer to profiling structure.
262
+ * If provided, the structure would record time spent in different phases of the computation.
263
+ */
264
+ enum nnp_status nnp_convolution_input_gradient(
265
+ enum nnp_convolution_algorithm algorithm,
266
+ size_t batch_size,
267
+ size_t input_channels,
268
+ size_t output_channels,
269
+ struct nnp_size input_size,
270
+ struct nnp_padding input_padding,
271
+ struct nnp_size kernel_size,
272
+ const float* grad_output,
273
+ const float* kernel,
274
+ float* grad_input,
275
+ void* workspace_buffer,
276
+ size_t* workspace_size,
277
+ enum nnp_activation activation,
278
+ const void* activation_parameters,
279
+ pthreadpool_t threadpool,
280
+ struct nnp_profile* profile);
281
+
282
+ /**
283
+ * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors.
284
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
285
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
286
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
287
+ *
288
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
289
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
290
+ * Supports kernels up to 8x8.
291
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
292
+ * Supports kernels up to 16x16.
293
+ *
294
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
295
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
296
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
297
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
298
+ * @param input_padding Implicit zero-padding of input images.
299
+ * @param kernel_size Kernel size.
300
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
301
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
302
+ * where
303
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
304
+ * (kernel_size.height - 1)
305
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
306
+ * (kernel_size.width - 1)
307
+ * @param[out] grad_kernel A 4D tensor
308
+ * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
309
+ * @param threadpool A thread pool for parallelization of the computation.
310
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
311
+ * @param[out] profile An optional pointer to profiling structure.
312
+ * If provided, the structure would record time spent in different phases of the computation.
313
+ */
314
+ enum nnp_status nnp_convolution_kernel_gradient(
315
+ enum nnp_convolution_algorithm algorithm,
316
+ size_t batch_size,
317
+ size_t input_channels,
318
+ size_t output_channels,
319
+ struct nnp_size input_size,
320
+ struct nnp_padding input_padding,
321
+ struct nnp_size kernel_size,
322
+ const float* input,
323
+ const float* grad_output,
324
+ float* grad_kernel,
325
+ void* workspace_buffer,
326
+ size_t* workspace_size,
327
+ enum nnp_activation activation,
328
+ const void* activation_parameters,
329
+ pthreadpool_t threadpool,
330
+ struct nnp_profile* profile);
331
+
332
+ /**
333
+ * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor.
334
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
335
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
336
+ *
337
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
338
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
339
+ * Supports kernels up to 8x8.
340
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
341
+ * Supports kernels up to 16x16.
342
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
343
+ * Supports only 3x3 kernels.
344
+ *
345
+ * @param transform_strategy A strategy that guides computation of kernel transforms coefficients.
346
+ * Possible values are:
347
+ *
348
+ * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed
349
+ * coefficients.
350
+ * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed
351
+ * coefficients.
352
+ *
353
+ * @param input_channels The number of channels (AKA features, dimensions) in the input image.
354
+ * @param output_channels The number of channels (AKA features, dimensions) in the output image.
355
+ * @param input_size Size of input image, excluding implicit zero-padding.
356
+ * @param input_padding Implicit zero-padding of input image.
357
+ * @param kernel_size Kernel size.
358
+ * @param output_subsampling Subsample region for output, also known as convolution stride.
359
+ * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width].
360
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
361
+ * @param[in] bias A 1D array bias[output_channels].
362
+ * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where
363
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
364
+ * (kernel_size.height - 1)
365
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
366
+ * (kernel_size.width - 1)
367
+ * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes.
368
+ * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size
369
+ * of required workspace memory at the workspace_size location, and exit without
370
+ * computations.
371
+ * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory
372
+ * before and deallocate after this computation, potentially at significant runtime cost.
373
+ * @param[in,out] workspace_size Pointer to the size of workspace buffer.
374
+ * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to
375
+ * the location specified by this pointer.
376
+ * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of
377
+ * the buffer, in bytes.
378
+ * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK
379
+ * would allocate memory before and deallocate after this computation, potentially at
380
+ * significant runtime cost.
381
+ * @param threadpool A thread pool for parallelization of the computation.
382
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
383
+ * @param[out] profile An optional pointer to profiling structure.
384
+ * If provided, the structure would record time spent in different phases of the computation.
385
+ */
386
+ enum nnp_status nnp_convolution_inference(
387
+ enum nnp_convolution_algorithm algorithm,
388
+ enum nnp_convolution_transform_strategy transform_strategy,
389
+ size_t input_channels,
390
+ size_t output_channels,
391
+ struct nnp_size input_size,
392
+ struct nnp_padding input_padding,
393
+ struct nnp_size kernel_size,
394
+ struct nnp_size output_subsampling,
395
+ const float* input,
396
+ const float* kernel,
397
+ const float* bias,
398
+ float* output,
399
+ void* workspace_buffer,
400
+ size_t* workspace_size,
401
+ enum nnp_activation activation,
402
+ const void* activation_parameters,
403
+ pthreadpool_t threadpool,
404
+ struct nnp_profile* profile);
405
+
406
+ /**
407
+ * @brief Computes output of a fully connected layer from input and kernel matrices.
408
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
409
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
410
+ * For minibatch size 1, use nnp_fully_connected_inference for optimal performance.
411
+ * @param batch_size The number of vectors on the input and output of the fully connected layer.
412
+ * @param input_channels The number of channels (AKA features, dimensions) in the input matrix.
413
+ * @param output_channels The number of channels (AKA features, dimensions) in the output matrix.
414
+ * @param[in] input A 2D matrix input[batch_size][input_channels].
415
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels].
416
+ * @param[out] output A 2D matrix output[batch_size][output_channels].
417
+ * @param threadpool A thread pool for parallelization of the computation.
418
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
419
+ */
420
+ enum nnp_status nnp_fully_connected_output(
421
+ size_t batch_size,
422
+ size_t input_channels,
423
+ size_t output_channels,
424
+ const float input[],
425
+ const float kernel[],
426
+ float output[],
427
+ pthreadpool_t threadpool,
428
+ struct nnp_profile* profile);
429
+
430
+ /**
431
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
432
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
433
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
434
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
435
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
436
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements.
437
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
438
+ * @param threadpool A thread pool for parallelization of the computation.
439
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
440
+ */
441
+ enum nnp_status nnp_fully_connected_inference(
442
+ size_t input_channels,
443
+ size_t output_channels,
444
+ const float* input,
445
+ const float* kernel,
446
+ float* output,
447
+ pthreadpool_t threadpool);
448
+
449
+ /**
450
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
451
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
452
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
453
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
454
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
455
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements.
456
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
457
+ * @param threadpool A thread pool for parallelization of the computation.
458
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
459
+ */
460
+ enum nnp_status nnp_fully_connected_inference_f16f32(
461
+ size_t input_channels,
462
+ size_t output_channels,
463
+ const float* input,
464
+ const void* kernel,
465
+ float* output,
466
+ pthreadpool_t threadpool);
467
+
468
+ /**
469
+ * @brief Computes output of a max-pooling layer for an input tensor.
470
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
471
+ * propagation. Is is optimized for both large and small minibatch sizes.
472
+ * @param batch_size The number of images on the input and output of the max-pooling layer.
473
+ * @param channels The number of channels (AKA features, dimensions) in both input and output images.
474
+ * @param input_size Size of input images, excluding implicit zero-padding.
475
+ * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but
476
+ * affect the output size.
477
+ * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported.
478
+ * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported.
479
+ * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width].
480
+ * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where
481
+ * output_size.height = ceil(
482
+ * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) /
483
+ * pooling_stride.height) + 1
484
+ * output_size.width = ceil(
485
+ * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) /
486
+ * pooling_stride.width) + 1
487
+ * @param threadpool A thread pool for parallelization of the computation.
488
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
489
+ */
490
+ enum nnp_status nnp_max_pooling_output(
491
+ size_t batch_size,
492
+ size_t channels,
493
+ struct nnp_size input_size,
494
+ struct nnp_padding input_padding,
495
+ struct nnp_size pooling_size,
496
+ struct nnp_size pooling_stride,
497
+ const float input[],
498
+ float output[],
499
+ pthreadpool_t threadpool);
500
+
501
+ /**
502
+ * @brief Computes output of a softmax layer for an input matrix.
503
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
504
+ * propagation. Is is optimized for both large and small minibatch sizes.
505
+ * @param batch_size The number of vectors on the input and output of the softmax layer.
506
+ * @param channels The number of channels (AKA features, dimensions) in both input and output vectors.
507
+ * @param[in] input A 2D matrix input[batch_size][channels].
508
+ * @param[out] output A 2D matrix output[batch_size][channels].
509
+ * @param threadpool A thread pool for parallelization of the computation.
510
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
511
+ */
512
+ enum nnp_status nnp_softmax_output(
513
+ size_t batch_size,
514
+ size_t channels,
515
+ const float input[],
516
+ float output[],
517
+ pthreadpool_t threadpool);
518
+
519
+ /**
520
+ * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix.
521
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
522
+ * propagation. Is is optimized for both large and small minibatch sizes.
523
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
524
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
525
+ * @param[in] input A 2D matrix input[batch_size][channels].
526
+ * @param[out] output A 2D matrix output[batch_size][channels].
527
+ * @param threadpool A thread pool for parallelization of the computation.
528
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
529
+ */
530
+ enum nnp_status nnp_relu_output(
531
+ size_t batch_size,
532
+ size_t channels,
533
+ const float input[],
534
+ float output[],
535
+ float negative_slope,
536
+ pthreadpool_t threadpool);
537
+
538
+ /**
539
+ * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices.
540
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
541
+ * Is is optimized for both large and small minibatch sizes.
542
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
543
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
544
+ * @param[in] input A 2D matrix input[batch_size][channels].
545
+ * @param[out] output A 2D matrix output[batch_size][channels].
546
+ * @param threadpool A thread pool for parallelization of the computation.
547
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
548
+ */
549
+ enum nnp_status nnp_relu_input_gradient(
550
+ size_t batch_size,
551
+ size_t channels,
552
+ const float grad_output[],
553
+ const float input[],
554
+ float grad_input[],
555
+ float negative_slope,
556
+ pthreadpool_t threadpool);
557
+
558
+ #ifdef __cplusplus
559
+ } /* extern "C" */
560
+ #endif
561
+
562
+ #ifdef __cplusplus
563
+ // Backward compatible implementations for nnp_convolution_*, if we are in C++
564
+ // mode.
565
+ inline enum nnp_status nnp_convolution_output(
566
+ enum nnp_convolution_algorithm algorithm,
567
+ size_t batch_size,
568
+ size_t input_channels,
569
+ size_t output_channels,
570
+ struct nnp_size input_size,
571
+ struct nnp_padding input_padding,
572
+ struct nnp_size kernel_size,
573
+ const float input[],
574
+ const float kernel[],
575
+ const float bias[],
576
+ float output[],
577
+ pthreadpool_t threadpool,
578
+ struct nnp_profile* profile)
579
+ {
580
+ return nnp_convolution_output(
581
+ algorithm,
582
+ batch_size, input_channels, output_channels,
583
+ input_size, input_padding, kernel_size,
584
+ input, kernel, bias, output,
585
+ NULL, NULL,
586
+ nnp_activation_identity, NULL, threadpool, profile);
587
+ }
588
+
589
+ inline enum nnp_status nnp_convolution_input_gradient(
590
+ enum nnp_convolution_algorithm algorithm,
591
+ size_t batch_size,
592
+ size_t input_channels,
593
+ size_t output_channels,
594
+ struct nnp_size input_size,
595
+ struct nnp_padding input_padding,
596
+ struct nnp_size kernel_size,
597
+ const float grad_output[],
598
+ const float kernel[],
599
+ float grad_input[],
600
+ pthreadpool_t threadpool,
601
+ struct nnp_profile* profile)
602
+ {
603
+ return nnp_convolution_input_gradient(
604
+ algorithm,
605
+ batch_size, input_channels, output_channels,
606
+ input_size, input_padding, kernel_size,
607
+ grad_output, kernel, grad_input,
608
+ NULL, NULL,
609
+ nnp_activation_identity, NULL, threadpool, profile);
610
+ }
611
+
612
+ inline enum nnp_status nnp_convolution_kernel_gradient(
613
+ enum nnp_convolution_algorithm algorithm,
614
+ size_t batch_size,
615
+ size_t input_channels,
616
+ size_t output_channels,
617
+ struct nnp_size input_size,
618
+ struct nnp_padding input_padding,
619
+ struct nnp_size kernel_size,
620
+ const float input[],
621
+ const float grad_output[],
622
+ float grad_kernel[],
623
+ pthreadpool_t threadpool,
624
+ struct nnp_profile* profile)
625
+ {
626
+ return nnp_convolution_kernel_gradient(
627
+ algorithm,
628
+ batch_size, input_channels, output_channels,
629
+ input_size, input_padding, kernel_size,
630
+ input, grad_output, grad_kernel,
631
+ NULL, NULL,
632
+ nnp_activation_identity, NULL, threadpool, profile);
633
+ }
634
+
635
+ inline enum nnp_status nnp_convolution_inference(
636
+ enum nnp_convolution_algorithm algorithm,
637
+ enum nnp_convolution_transform_strategy transform_strategy,
638
+ size_t input_channels,
639
+ size_t output_channels,
640
+ struct nnp_size input_size,
641
+ struct nnp_padding input_padding,
642
+ struct nnp_size kernel_size,
643
+ struct nnp_size output_subsampling,
644
+ const float input[],
645
+ const float kernel[],
646
+ const float bias[],
647
+ float output[],
648
+ pthreadpool_t threadpool,
649
+ struct nnp_profile* profile) {
650
+ return nnp_convolution_inference(
651
+ algorithm, transform_strategy,
652
+ input_channels, output_channels,
653
+ input_size, input_padding, kernel_size, output_subsampling,
654
+ input, kernel, bias, output, NULL, NULL,
655
+ nnp_activation_identity, NULL,
656
+ threadpool, profile);
657
+ }
658
+
659
+ #endif // __cplusplus