applied-ai-018 commited on
Commit
7d35f20
·
verified ·
1 Parent(s): 1423071

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_decomp/__init__.py +444 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions.py +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py +302 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py +263 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__init__.py +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_dispatch/python.py +178 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh +3 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh +3 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h +57 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h +44 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h +195 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h +117 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Event.h +121 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h +84 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h +75 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h +46 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h +51 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h +82 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h +672 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h +165 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h +88 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h +207 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/alignment.h +21 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/clog.h +100 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/cpuinfo.h +1939 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl.h +22 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_config.h +22 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_debug.h +22 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_ocl.h +22 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl.h +22 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h +22 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h +22 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_types.h +22 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_version.h +22 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/fp16.h +11 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/fxdiv.h +425 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/libshm.h +46 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/nnpack.h +659 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/psimd.h +1384 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/pthreadpool.h +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack.h +336 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack_func.h +166 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/sleef.h +0 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/xnnpack.h +0 -0
env-llmeval/lib/python3.10/site-packages/torch/_decomp/__init__.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from collections import defaultdict
3
+ from functools import wraps
4
+ from itertools import chain
5
+ from typing import Callable, Dict, List, Sequence, Union
6
+
7
+ import torch
8
+ import torch.library
9
+ from torch._ops import HigherOrderOperator, OpOverload, OpOverloadPacket
10
+ from torch._prims_common import CustomOutParamAnnotation
11
+ from torch.utils import _pytree as pytree
12
+
13
+ __all__ = [
14
+ "decomposition_table",
15
+ "pre_autograd_decomposition_table",
16
+ "meta_table",
17
+ "register_decomposition",
18
+ "get_decompositions",
19
+ "core_aten_decompositions",
20
+ ]
21
+
22
+
23
+ # TODO: relax key type here; torch registrations should be possible to; but
24
+ # right now this type is accurate
25
+ global_decomposition_table: Dict[
26
+ str, Dict[torch._ops.OperatorBase, Callable]
27
+ ] = defaultdict(dict)
28
+
29
+ decomposition_table = global_decomposition_table["post_autograd"]
30
+ pre_autograd_decomposition_table = global_decomposition_table["pre_autograd"]
31
+ meta_table = global_decomposition_table["meta"]
32
+
33
+
34
+ def _add_op_to_registry(registry, op, fn):
35
+ """
36
+ This is an internal API for adding an op to the decomposition table.
37
+
38
+ If op is OpOverload, it will be added to the registry directly.
39
+ If op is OpOverloadPacket, all the valid op_overloads in the packet will be added to the registry.
40
+ """
41
+ overloads: List[Union[torch._ops.OperatorBase]] = []
42
+ if isinstance(op, HigherOrderOperator):
43
+ # There's no concept of overloads for HigherOrderOperator
44
+ registry[op] = fn
45
+ return
46
+ elif isinstance(op, OpOverload):
47
+ overloads.append(op)
48
+ else:
49
+ assert isinstance(op, OpOverloadPacket)
50
+ for ol in op.overloads():
51
+ overloads.append(getattr(op, ol))
52
+
53
+ for op_overload in overloads:
54
+ if op_overload in registry:
55
+ raise RuntimeError(f"duplicate registrations for {op_overload}")
56
+ # TorchScript dumps a bunch of extra nonsense overloads
57
+ # which don't have corresponding dispatcher entries, we need
58
+ # to filter those out, e.g aten.add.float_int
59
+ if torch._C._dispatch_has_kernel(op_overload.name()):
60
+ registry[op_overload] = fn
61
+
62
+
63
+ def _convert_out_params(f):
64
+ out_annotation = f.__annotations__.get("out")
65
+
66
+ # If there are no out params, do not wrap the function.
67
+ if not out_annotation:
68
+ return f
69
+
70
+ # Hack to detect when out is a Tuple. There seems to be no pretty way of doing this
71
+ if getattr(out_annotation, "__origin__", None) is tuple:
72
+ sig = inspect.signature(f)
73
+ out_names = sig.return_annotation._fields
74
+ # If out is a tuple, we need to register a function that unpacks all the out
75
+ # elements as this is what native_functions.yaml expects
76
+
77
+ @wraps(f)
78
+ def _fn(*args, **kwargs):
79
+ out_kwargs = tuple(kwargs.pop(o, None) for o in out_names)
80
+ # Either all of the out kwargs are set or none of them
81
+ is_none = out_kwargs[0] is None
82
+ assert all((o is None) == is_none for o in out_kwargs)
83
+ return f(*args, **kwargs, out=None if is_none else out_kwargs)
84
+
85
+ out_params = [
86
+ inspect.Parameter(
87
+ o,
88
+ kind=inspect.Parameter.KEYWORD_ONLY,
89
+ default=None,
90
+ annotation=t,
91
+ )
92
+ for o, t in zip(out_names, out_annotation.__args__)
93
+ ]
94
+ # Drop the out parameter and concatenate the new kwargs in the signature
95
+ params = chain((v for k, v in sig.parameters.items() if k != "out"), out_params)
96
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
97
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
98
+ )
99
+ # Drop the out parameter and concatenate the new kwargs in the annotations
100
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
101
+ for o in out_params:
102
+ _fn.__annotations__[o.name] = o.annotation
103
+
104
+ # Propagate that this function is wrapped by `out_wrapper`
105
+ _fn._torch_decompositions_out_wrapper = f._torch_decompositions_out_wrapper # type: ignore[attr-defined]
106
+
107
+ return _fn
108
+
109
+ # Alternatively, there may be a single tensor out parameter with a name
110
+ # other than "out". This will need special treatment and is indicated by an
111
+ # annotation, which we will remove here so it is not exposed after wrapping.
112
+ custom_out_param_name = f.__annotations__.pop(CustomOutParamAnnotation, None)
113
+ if custom_out_param_name:
114
+
115
+ @wraps(f)
116
+ def _fn(*args, **kwargs):
117
+ out_kwarg = kwargs.pop(custom_out_param_name, None)
118
+ return f(*args, **kwargs, out=out_kwarg)
119
+
120
+ out_param = inspect.Parameter(
121
+ custom_out_param_name,
122
+ kind=inspect.Parameter.KEYWORD_ONLY,
123
+ default=None,
124
+ annotation=out_annotation,
125
+ )
126
+
127
+ # Drop the out parameter and concatenate the new kwarg in the signature
128
+ sig = inspect.signature(f)
129
+ params = chain(
130
+ (v for k, v in sig.parameters.items() if k != "out"), (out_param,)
131
+ )
132
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
133
+ parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type]
134
+ )
135
+
136
+ # Drop the out parameter and concatenate the new kwargs in the annotations
137
+ _fn.__annotations__ = {k: v for k, v in f.__annotations__.items() if k != "out"}
138
+ _fn.__annotations__[out_param.name] = out_param.annotation
139
+
140
+ return _fn
141
+
142
+ return f
143
+
144
+
145
+ def register_decomposition(
146
+ aten_op, registry=None, *, type="post_autograd", unsafe=False
147
+ ):
148
+ """
149
+ A decorator to register a function as a decomposition to the Python
150
+ decomposition table. Use it like this::
151
+
152
+ @register_decomposition(torch.ops.aten.clamp_min)
153
+ def clamp_min(x):
154
+ return torch.clamp(self, min=min)
155
+
156
+ If you are writing a new decomposition, consider contributing it
157
+ directly to PyTorch in torch._decomp.decompositions.
158
+
159
+ This API is experimental; we are almost certainly going to extend
160
+ the API when we make decompositions eligible for use in transforms (e.g.,
161
+ autograd) and not just backend tracing, where we then need to know if a
162
+ decomposition can be used to simulate a transform.
163
+
164
+ By default, we also will register it to the Meta key of dispatcher,
165
+ and replace the c++ Meta implementation if there is already one.
166
+
167
+ unsafe kwarg is for reuse of this function for registering non-function
168
+ things
169
+ """
170
+
171
+ assert type in {"post_autograd", "pre_autograd", "meta"}
172
+
173
+ def decomposition_decorator(fn: Callable) -> Callable:
174
+ if not unsafe:
175
+ fn = _convert_out_params(fn)
176
+
177
+ nonlocal registry
178
+ if registry is None:
179
+ registry = global_decomposition_table[type]
180
+
181
+ def register(op):
182
+ _add_op_to_registry(registry, op, fn)
183
+
184
+ # To handle allowing multiple aten_ops at once
185
+ pytree.tree_map_(register, aten_op)
186
+ return fn
187
+
188
+ return decomposition_decorator
189
+
190
+
191
+ def get_decompositions(
192
+ aten_ops: Sequence[Union[torch._ops.OperatorBase, OpOverloadPacket]],
193
+ type: str = "post_autograd",
194
+ ) -> Dict[torch._ops.OperatorBase, Callable]:
195
+ """
196
+ Retrieve a dictionary of decompositions corresponding to the list of
197
+ operator overloads and overload packets passed as input. Overload
198
+ packets will include all decomposed overloads in the packet. If there is
199
+ no decomposition for a requested operator, it is silently ignored.
200
+
201
+ This API is experimental; we are almost certainly going to give an alternate,
202
+ more recommended formulation, where a user provides the set of operators
203
+ they know how to implement, and we provide decompositions for everything
204
+ not in this set.
205
+ """
206
+ assert type in {"post_autograd", "pre_autograd", "meta"}
207
+
208
+ registry = global_decomposition_table[type]
209
+ packets_to_overloads = defaultdict(list)
210
+ for opo in registry:
211
+ if isinstance(opo, (OpOverload, OpOverloadPacket)):
212
+ packets_to_overloads[opo.overloadpacket].append(opo)
213
+ decompositions: Dict[torch._ops.OperatorBase, Callable] = {}
214
+ for op in aten_ops:
215
+ if isinstance(op, OpOverloadPacket) and op in packets_to_overloads:
216
+ for op_overload in packets_to_overloads[op]:
217
+ decompositions[op_overload] = registry[op_overload]
218
+ elif isinstance(op, (torch._ops.OperatorBase)) and op in registry:
219
+ decompositions[op] = registry[op]
220
+ return decompositions
221
+
222
+
223
+ def remove_decompositions(
224
+ decompositions: Dict[torch._ops.OperatorBase, Callable],
225
+ aten_ops: Sequence[Union[OpOverload, OpOverloadPacket]],
226
+ ) -> None:
227
+ """
228
+ Given a dictionary of decompositions obtained from get_decompositions(), removes
229
+ operators associated with a list of operator overloads and overload packets passed
230
+ as input. If the decomposition dictionary does not contain a decomposition that is
231
+ specified to be removed, it is silently ignored.
232
+ """
233
+ for op in aten_ops:
234
+ if isinstance(op, OpOverloadPacket):
235
+ for overload_name in op.overloads():
236
+ opo = getattr(op, overload_name)
237
+ decompositions.pop(opo, None)
238
+ elif isinstance(op, OpOverload):
239
+ decompositions.pop(op, None)
240
+
241
+
242
+ # populate the table
243
+ import torch._decomp.decompositions
244
+ import torch._refs
245
+
246
+
247
+ # See NOTE [Core ATen Ops]
248
+ #
249
+ # list was copied from torch/_inductor/decomposition.py
250
+ # excluding decompositions that results in prim ops
251
+ # Resulting opset of decomposition is core aten ops
252
+ def core_aten_decompositions() -> Dict[torch._ops.OperatorBase, Callable]:
253
+ aten = torch.ops.aten
254
+ return get_decompositions(
255
+ [
256
+ aten.addcdiv,
257
+ aten.addcdiv_,
258
+ aten.addcmul,
259
+ aten.addcmul_,
260
+ aten.addr,
261
+ aten.affine_grid_generator,
262
+ aten.all,
263
+ aten.aminmax,
264
+ aten.arange.default,
265
+ aten.arange.start,
266
+ aten.avg_pool2d_backward,
267
+ aten.baddbmm,
268
+ aten.binary_cross_entropy,
269
+ aten.binary_cross_entropy_backward,
270
+ aten.binary_cross_entropy_with_logits,
271
+ aten.celu,
272
+ aten.celu_,
273
+ aten.clamp_max,
274
+ aten.clamp_min,
275
+ aten.col2im,
276
+ aten.count_nonzero,
277
+ aten.cudnn_batch_norm,
278
+ aten.cudnn_batch_norm_backward,
279
+ aten.deg2rad,
280
+ aten.deg2rad_,
281
+ aten.detach,
282
+ aten.diag_embed,
283
+ aten.diagonal_backward,
284
+ aten.dot,
285
+ aten.vdot,
286
+ aten.elu,
287
+ aten.elu_,
288
+ aten.elu_backward,
289
+ aten._embedding_bag,
290
+ aten.embedding_dense_backward,
291
+ aten.empty_like,
292
+ aten._euclidean_dist.default,
293
+ aten.expand_as,
294
+ aten.eye,
295
+ aten.fill,
296
+ aten.fill_,
297
+ aten.floor_divide,
298
+ aten.frac,
299
+ aten.frac_,
300
+ aten._fused_moving_avg_obs_fq_helper,
301
+ aten.gelu_,
302
+ aten.gelu_backward,
303
+ aten.glu,
304
+ aten.glu_backward,
305
+ aten.hardshrink,
306
+ aten.hardsigmoid,
307
+ aten.hardsigmoid_,
308
+ aten.hardsigmoid_backward,
309
+ aten.hardswish,
310
+ aten.hardswish_,
311
+ aten.hardswish_backward,
312
+ aten.hardtanh_,
313
+ aten.hardtanh_backward,
314
+ aten.heaviside,
315
+ aten.heaviside_,
316
+ aten.huber_loss,
317
+ aten.huber_loss_backward,
318
+ aten.im2col,
319
+ aten.index_add,
320
+ aten.index_add_,
321
+ aten.index_copy,
322
+ aten.index_copy_,
323
+ aten.index_fill,
324
+ aten.index_fill_,
325
+ aten.isneginf,
326
+ aten.isposinf,
327
+ aten.l1_loss,
328
+ aten.leaky_relu_,
329
+ aten.leaky_relu_backward,
330
+ aten.lerp,
331
+ aten.lerp_,
332
+ aten.linspace,
333
+ aten.logaddexp,
334
+ aten.logaddexp2,
335
+ aten.logit,
336
+ aten.logit_,
337
+ aten.logit_backward,
338
+ aten.log_sigmoid_backward,
339
+ aten.log_sigmoid_forward,
340
+ aten._log_softmax_backward_data,
341
+ aten.logspace,
342
+ aten.logsumexp.default,
343
+ aten.masked_fill,
344
+ aten.masked_fill_,
345
+ aten.mish,
346
+ aten.mish_,
347
+ aten.mse_loss,
348
+ aten.mse_loss_backward,
349
+ aten.multi_margin_loss,
350
+ aten.multilabel_margin_loss_forward,
351
+ aten.mv,
352
+ aten.mvlgamma,
353
+ aten.mvlgamma_,
354
+ aten.nansum,
355
+ aten.nan_to_num,
356
+ aten.nan_to_num_,
357
+ aten.narrow,
358
+ aten.native_batch_norm_backward,
359
+ aten.native_dropout_backward,
360
+ aten.native_group_norm_backward,
361
+ aten.native_layer_norm_backward,
362
+ aten.new_empty,
363
+ aten.new_full,
364
+ aten.new_ones,
365
+ aten.new_zeros,
366
+ aten.nll_loss_backward,
367
+ aten.nll_loss_forward,
368
+ aten.norm,
369
+ aten.ones,
370
+ aten.ones_like,
371
+ aten._prelu_kernel,
372
+ aten._prelu_kernel_backward,
373
+ aten._reshape_alias,
374
+ aten.rad2deg,
375
+ aten.rad2deg_,
376
+ aten.renorm,
377
+ aten.renorm_,
378
+ aten.replication_pad2d,
379
+ aten.rot90,
380
+ aten.rrelu_with_noise,
381
+ aten.rrelu_with_noise_,
382
+ aten.rsub.Scalar,
383
+ aten.rsub.Tensor,
384
+ aten._scaled_dot_product_flash_attention.default,
385
+ aten.select_backward,
386
+ aten.select_scatter,
387
+ aten.sgn,
388
+ aten.sgn_,
389
+ aten.sigmoid_backward,
390
+ aten.silu,
391
+ aten.silu_,
392
+ aten.silu_backward,
393
+ aten.sinc,
394
+ aten.sinc_,
395
+ aten.slice_backward,
396
+ aten.smooth_l1_loss,
397
+ aten.smooth_l1_loss_backward,
398
+ aten.soft_margin_loss,
399
+ aten.soft_margin_loss_backward,
400
+ aten._softmax_backward_data,
401
+ aten.softplus,
402
+ aten.softplus_backward,
403
+ aten.softshrink,
404
+ aten.special_entr,
405
+ aten.special_log_ndtr,
406
+ aten.special_xlog1py,
407
+ aten.split.Tensor,
408
+ aten.squeeze.default,
409
+ aten.squeeze.dim,
410
+ aten.std,
411
+ aten.std_mean,
412
+ aten.stack,
413
+ aten.sum.default,
414
+ aten.sum.out,
415
+ aten.t,
416
+ aten.tanh_backward,
417
+ aten.threshold,
418
+ aten.threshold_,
419
+ aten.threshold_backward,
420
+ aten.trace,
421
+ aten.transpose.int,
422
+ aten.tril,
423
+ aten.tril_,
424
+ aten.triu,
425
+ aten.triu_,
426
+ aten.unbind,
427
+ aten.unfold_backward,
428
+ aten.unfold_copy,
429
+ aten._unsafe_index,
430
+ aten.unsafe_split.Tensor,
431
+ aten.unsafe_split_with_sizes,
432
+ aten._unsafe_view,
433
+ aten.upsample_bilinear2d,
434
+ aten.upsample_nearest2d_backward,
435
+ aten.view_as_complex,
436
+ aten.xlogy,
437
+ aten.xlogy_,
438
+ aten.zero,
439
+ aten.zero_,
440
+ aten.zeros,
441
+ aten.zeros_like,
442
+ aten._weight_norm_interface,
443
+ ]
444
+ )
env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc ADDED
Binary file (102 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_jvp.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions_for_rng.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_jvp.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, Dict, List, Optional, Tuple
3
+
4
+ import torch
5
+ import torch._decomp
6
+ from torch import Tensor
7
+ from torch._prims_common.wrappers import _maybe_remove_out_wrapper
8
+
9
+ decomposition_table = torch._decomp.decomposition_table
10
+ decomposition_table_for_jvp: Dict[torch._ops.OperatorBase, Callable] = {}
11
+ register_decomposition = torch._decomp.register_decomposition
12
+ aten = torch.ops.aten
13
+
14
+ # NOTE: [forward-mode AD decompositions mechanism]
15
+ #
16
+ # The mechanism is in VariableType,
17
+ # IF any inputs have forward grad
18
+ # AND there is no forward AD formula implemented
19
+ # AND the functions is actually differentiable
20
+ # run the decomposition
21
+ # See run_jit_decomposition_with_args_for_jvp
22
+ # We currently use python decompositions that we torchscript.
23
+ #
24
+ # Note that we would be building the backward graph at the decomposed level
25
+ # too, but that is OK, because we would've errored out otherwise anyway.
26
+ #
27
+ # TODO: The mechanism we are using to register decompositions doesn't
28
+ # seem to be exclusively used for jvp. So open question here is whether
29
+ # torch/csrc/jit/runtime/decomposition_registry.cpp is being used for other things.
30
+ # If that is the case, we may go down the decomposition path unexpectedly
31
+ # (and possibly produce an unintelligible error) vs erroring out earlier and
32
+ # printing that the forward AD formula is not implemented.
33
+ #
34
+ # The solution to this may be to have a explicitly white list control when
35
+ # to enable the decomposition.
36
+
37
+
38
+ def maybe_register_decomposition(op):
39
+ def decorator(f):
40
+ try:
41
+ return register_decomposition(op)(f)
42
+ except Exception:
43
+ return f
44
+
45
+ return decorator
46
+
47
+
48
+ # Functions where we need a special decomposition for jvp but there's another version that
49
+ # should be used more generally (ex. for jvp we need to recompute the mean and variance for
50
+ # the backwards of a normalization function. Without jvp, it should use the saved value)
51
+ decomposition_table_for_jvp = {}
52
+
53
+
54
+ def register_decomposition_for_jvp(fn):
55
+ return register_decomposition(fn, registry=decomposition_table_for_jvp)
56
+
57
+
58
+ def _register_jit_decomposition_for_jvp(decomp, use_python=False):
59
+ if decomp in decomposition_table_for_jvp:
60
+ decomposition_table_used = decomposition_table_for_jvp
61
+ elif decomp in decomposition_table:
62
+ decomposition_table_used = decomposition_table
63
+ else:
64
+ raise RuntimeError(f"could not find decomposition for {decomp}")
65
+ decomp_fn = decomposition_table_used[decomp]
66
+
67
+ # `out_wrapper` extends a decompositions signature with
68
+ # an `out` parameter. However jit will use the unwrapped function's
69
+ # signature instead so we need to unwrap here to prevent an error
70
+ decomp_fn = _maybe_remove_out_wrapper(decomp_fn)
71
+
72
+ if use_python:
73
+ decomp_fn = torch.jit.ignore(decomp_fn)
74
+ sig = inspect.signature(decomp_fn)
75
+
76
+ # Create a string wrapping the function from the signature
77
+ # example output:
78
+ # def wrapped_decomp(x: torch.Tensor, y: int, z: int):
79
+ # return decomp_fn(x, y, z)
80
+ # Thanks copilot!
81
+ def get_function_def(sig):
82
+ param_def = [f"{param_str}" for param_str in sig.parameters.values()]
83
+ param_use = [f"{param_str}" for param_str in sig.parameters.keys()]
84
+
85
+ return f"def wrapped_decomp({', '.join(param_def)}):\n return decomp_fn({', '.join(param_use)})\n"
86
+
87
+ f_str = get_function_def(sig)
88
+ graph = torch.jit.CompilationUnit(f_str).wrapped_decomp.graph
89
+ else:
90
+ graph = torch.jit.script(decomp_fn).graph
91
+ torch.jit._register_decomposition(decomp, graph)
92
+
93
+
94
+ # The only decompositions here are temporary or hacks for the purposes of jvp
95
+
96
+
97
+ # TODO: do these also belong here?
98
+ @maybe_register_decomposition(aten.trace.default)
99
+ def trace(self: Tensor) -> Tensor:
100
+ return torch.sum(torch.diag(self))
101
+
102
+
103
+ @maybe_register_decomposition(aten.log_sigmoid_forward.default)
104
+ def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]:
105
+ min = torch.minimum(self.new_zeros(()), self)
106
+ z = torch.exp(-torch.abs(self))
107
+ if self.is_cuda:
108
+ buffer = self.new_zeros((0,))
109
+ else:
110
+ buffer = z
111
+ return min - torch.log1p(z), buffer
112
+
113
+
114
+ def recompute_mean_var(
115
+ input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool
116
+ ):
117
+ # for most norm decompositions, it will be the same as the core version except for here.
118
+ # We recompute the mean and variance so that they track gradients through input
119
+
120
+ mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim)
121
+ var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim)
122
+ eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
123
+ eps = eps.detach()
124
+ rstd = 1 / torch.sqrt(var + eps)
125
+ return mean, rstd
126
+
127
+
128
+ @register_decomposition_for_jvp(aten.native_layer_norm_backward)
129
+ def native_layer_norm_backward(
130
+ grad_out: Tensor,
131
+ input: Tensor,
132
+ normalized_shape: List[int],
133
+ mean: Tensor,
134
+ rstd: Tensor,
135
+ weight: Optional[Tensor],
136
+ bias: Optional[Tensor],
137
+ output_mask: List[bool],
138
+ ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
139
+ input_shape = input.shape
140
+ input_ndim = input.dim()
141
+
142
+ axis = input_ndim - len(normalized_shape)
143
+ inner_dims = input_shape[axis:]
144
+ outer_dims = input_shape[:axis]
145
+ inner_dim_indices = list(range(axis, input_ndim))
146
+ outer_dim_indices = list(range(0, axis))
147
+
148
+ N = 1
149
+ for i in inner_dims:
150
+ N *= i
151
+ M = 1
152
+ for i in outer_dims:
153
+ M *= i
154
+ if M <= 0 or N <= 0:
155
+ return (
156
+ input.new_zeros(input_shape),
157
+ input.new_zeros(input_shape[axis:]),
158
+ input.new_zeros(input_shape[axis:]),
159
+ )
160
+
161
+ mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
162
+
163
+ x_hat = (input - mean_) * rstd_
164
+ if weight is not None:
165
+ grad_x_hat = grad_out * weight
166
+ else:
167
+ grad_x_hat = grad_out
168
+ a = grad_x_hat * N
169
+ b = torch.sum(grad_x_hat, inner_dim_indices, True)
170
+ c1 = torch.mul(grad_x_hat, x_hat)
171
+ c2 = torch.sum(c1, inner_dim_indices, True)
172
+ c3 = torch.mul(x_hat, c2)
173
+ inner = a - b - c3
174
+
175
+ if output_mask[0]:
176
+ d_input: Optional[Tensor] = (rstd_ / N) * inner
177
+ else:
178
+ d_input = torch.zeros_like(input) # should be None but doesn't work with vjp
179
+
180
+ if output_mask[1] and weight is not None:
181
+ if len(outer_dim_indices) > 0:
182
+ d_weight: Optional[Tensor] = torch.sum(
183
+ grad_out * x_hat, outer_dim_indices, False
184
+ )
185
+ else:
186
+ d_weight = grad_out * x_hat
187
+ elif weight is not None:
188
+ d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
189
+ else:
190
+ d_weight = torch.zeros(()) # should be None but doesn't work with vjp
191
+
192
+ if output_mask[2] and bias is not None:
193
+ if len(outer_dim_indices) > 0:
194
+ d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False)
195
+ else:
196
+ d_bias = grad_out.clone()
197
+ elif bias is not None:
198
+ d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp
199
+ else:
200
+ d_bias = torch.zeros(()) # should be None but doesn't work with vjp
201
+
202
+ return (d_input, d_weight, d_bias)
203
+
204
+
205
+ def prod(x: List[int]):
206
+ r = 1
207
+ for i in x:
208
+ r *= i
209
+ return r
210
+
211
+
212
+ @register_decomposition_for_jvp(aten.native_batch_norm_backward)
213
+ def native_batch_norm_backward(
214
+ grad_out: Tensor,
215
+ input: Tensor,
216
+ weight: Optional[Tensor],
217
+ running_mean: Optional[Tensor],
218
+ running_var: Optional[Tensor],
219
+ save_mean: Optional[Tensor],
220
+ save_invstd: Optional[Tensor],
221
+ train: bool,
222
+ eps: float,
223
+ output_mask: List[bool],
224
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
225
+ input_shape = input.shape
226
+ input_rank = input.dim()
227
+ assert input_rank >= 2, "rank of the input must be at least 2"
228
+
229
+ axis = 1
230
+ num_features = prod(input_shape) / input_shape[axis] # type: ignore[arg-type]
231
+ mean = save_mean
232
+ invstd = save_invstd
233
+ if train:
234
+ assert (
235
+ save_mean is not None and save_invstd is not None
236
+ ), "when train=True, save_mean and save_invstd are required"
237
+
238
+ reduciton_dims = [0] + list(range(2, input.dim()))
239
+ assert invstd is not None # for typing
240
+ mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False)
241
+ else:
242
+ assert running_mean is not None and running_var is not None
243
+ mean = running_mean
244
+ invstd = torch.rsqrt(running_var + eps)
245
+
246
+ assert invstd is not None and mean is not None
247
+
248
+ broadcast_mask = [1] * input_rank
249
+ broadcast_mask[axis] = input_shape[axis]
250
+
251
+ reduction_axes: List[int] = []
252
+ for i in range(input_rank):
253
+ if i != axis:
254
+ reduction_axes.append(i)
255
+
256
+ mean = torch.reshape(mean, broadcast_mask)
257
+ norm = 1.0 / num_features
258
+ grad_output_sum = torch.sum(grad_out, reduction_axes)
259
+ dot_p = torch.sum(grad_out * (input - mean), reduction_axes)
260
+
261
+ grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask)
262
+ proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask)
263
+
264
+ if weight is None:
265
+ grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0
266
+ else:
267
+ grad_scale = torch.reshape(invstd * weight, broadcast_mask)
268
+
269
+ if train:
270
+ proj = (input - mean) * proj_scale
271
+ grad_input = ((grad_out - proj) - grad_mean) * grad_scale
272
+ else:
273
+ grad_input = grad_out * grad_scale
274
+
275
+ if output_mask[1]:
276
+ grad_weight = dot_p * invstd
277
+ elif weight is not None:
278
+ grad_weight = torch.zeros_like(
279
+ weight
280
+ ) # should be None but doesn't work with vjp
281
+ else:
282
+ grad_weight = torch.zeros(()) # should be None but doesn't work with vjp
283
+
284
+ if output_mask[2]:
285
+ grad_bias = grad_output_sum
286
+ else:
287
+ grad_bias = torch.zeros_like(
288
+ grad_output_sum
289
+ ) # should be None but doesn't work with vjp
290
+
291
+ return (grad_input, grad_weight, grad_bias)
292
+
293
+
294
+ _register_jit_decomposition_for_jvp(torch.ops.aten.trace.default, use_python=True)
295
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss_backward.default)
296
+ _register_jit_decomposition_for_jvp(torch.ops.aten.nll_loss2d_backward.default)
297
+ _register_jit_decomposition_for_jvp(torch.ops.aten._log_softmax_backward_data.default)
298
+ _register_jit_decomposition_for_jvp(torch.ops.aten._softmax_backward_data.default)
299
+ _register_jit_decomposition_for_jvp(torch.ops.aten.log_sigmoid_forward.default)
300
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_layer_norm_backward.default)
301
+ _register_jit_decomposition_for_jvp(torch.ops.aten.native_batch_norm_backward.default)
302
+ _register_jit_decomposition_for_jvp(torch.ops.aten.cudnn_batch_norm_backward.default)
env-llmeval/lib/python3.10/site-packages/torch/_decomp/decompositions_for_rng.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from collections import defaultdict
3
+ from typing import Callable, Dict
4
+
5
+ import torch
6
+ import torch._decomp as decomp
7
+ from torch._decomp import get_decompositions
8
+ from torch._ops import OpOverload
9
+
10
+ aten = torch.ops.aten
11
+
12
+ rng_decompositions: Dict[str, Dict[OpOverload, Callable]] = defaultdict(dict)
13
+
14
+
15
+ def register_rng_decomposition(aten_op):
16
+ return decomp.register_decomposition(aten_op, rng_decompositions)
17
+
18
+
19
+ def throw_on_non_cuda(device):
20
+ raise RuntimeError(
21
+ f"You are trying to functionalize a {device.type} RNG operator but {device.type} does not "
22
+ f"use Philox/counter-based RNG. Therefore, functionalizing a {device.type} RNG operator is "
23
+ "not supported. We are discussing the possibility of a Philox-based RNG implementation for CPU."
24
+ )
25
+
26
+
27
+ # TODO - We have to register many more distributions here, and also higher level
28
+ # ops like dropout which have fused implementation and can hide the rand inside.
29
+ @register_rng_decomposition(aten.rand)
30
+ def rand(shape, dtype=None, layout=torch.strided, device=None, pin_memory=False):
31
+ if device and device.type != "cuda":
32
+ throw_on_non_cuda(device)
33
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
34
+ dtype = dtype or torch.float32
35
+ out, offset_jump = torch.ops.rngprims.philox_rand(
36
+ shape, seed, offset, None, device, dtype
37
+ )
38
+ PhiloxStateTracker.advance_offset(offset_jump)
39
+ return out
40
+
41
+
42
+ @register_rng_decomposition(aten.rand_like)
43
+ def rand_like(
44
+ x: torch.Tensor,
45
+ dtype=None,
46
+ layout=None,
47
+ device=None,
48
+ pin_memory=False,
49
+ memory_format=torch.preserve_format,
50
+ ):
51
+ device = device or x.device
52
+ if device.type != "cuda":
53
+ throw_on_non_cuda(device)
54
+ dtype = dtype or x.dtype
55
+ seed, offset = PhiloxStateTracker.get_state_as_tuple()
56
+ out, offset_jump = torch.ops.rngprims.philox_rand(
57
+ x.shape, seed, offset, None, device, dtype
58
+ )
59
+ PhiloxStateTracker.advance_offset(offset_jump)
60
+ return out
61
+
62
+
63
+ class PhiloxState:
64
+ """
65
+ Represents a PhiloxRngState - (seed, offset) where offset = base_offset +
66
+ relative_offset. seed and base_offset basically point to the rng state just
67
+ before tracing starts. relative offset tracks the totally consumed offset at
68
+ trace time.
69
+ """
70
+
71
+ def __init__(self):
72
+ self.reset()
73
+
74
+ def reset(self):
75
+ self.seed = torch.tensor(())
76
+ self.base_offset = torch.tensor(())
77
+ self.relative_offset = 0
78
+ self.offset_advanced_alteast_once = False
79
+
80
+ def validate_state(self):
81
+ assert self.seed.numel() != 0 and self.base_offset.numel() != 0
82
+
83
+ def advance_offset(self, consumed_offset):
84
+ self.offset_advanced_alteast_once = True
85
+ self.relative_offset = self.relative_offset + consumed_offset
86
+
87
+ def set_state(self, seed, base_offset, relative_offset=0):
88
+ self.seed = seed
89
+ self.base_offset = base_offset
90
+ self.relative_offset = relative_offset
91
+
92
+ def get_state_as_tuple(self):
93
+ self.validate_state()
94
+ return (self.seed, self.base_offset + self.relative_offset)
95
+
96
+ def get_state_as_tensor(self):
97
+ # Only needed because we override get_rng_state.
98
+ self.validate_state()
99
+ return torch.stack([self.seed, self.base_offset + self.relative_offset])
100
+
101
+ def set_state_from_tensor(self, state):
102
+ # Only needed because we override set_rng_state.
103
+ self.seed, self.base_offset = torch.unbind(state)
104
+ self.relative_offset = 0
105
+
106
+
107
+ class PhiloxStateTracker:
108
+ """
109
+ Singleton class to track the philox rng state during AOT Autograd tracing.
110
+ For each aot tracing instance, AOT Autograd resets this tracker and keeps
111
+ track of both forward and backward offsets. At runtime, we only care about
112
+ the total consumed forward and backward offsets. For dynamic shapes, these
113
+ offsets are a function of input shapes. Therefore, the AOT generated graphs
114
+ have additional outputs that compute total consumed forward and backward
115
+ offsets.
116
+ """
117
+
118
+ running_state: PhiloxState
119
+ fwd_state: PhiloxState
120
+ bwd_state: PhiloxState
121
+
122
+ def __enter__(self):
123
+ PhiloxStateTracker.reset()
124
+ return self
125
+
126
+ def __exit__(self, exc_type, exc_cal, exc_tb):
127
+ PhiloxStateTracker.reset()
128
+
129
+ @classmethod
130
+ def reset(cls):
131
+ cls.running_state = PhiloxState()
132
+ cls.fwd_state = PhiloxState()
133
+ cls.bwd_state = PhiloxState()
134
+
135
+ @classmethod
136
+ def mark_beginning_of_forward(cls):
137
+ # Tells the tracker to use fwd_state as the running state
138
+ cls.running_state = cls.fwd_state
139
+
140
+ @classmethod
141
+ def mark_beginning_of_backward(cls):
142
+ # Tells the tracker to use bwd_state as the running state
143
+ cls.running_state = cls.bwd_state
144
+
145
+ @classmethod
146
+ def record_state(cls, seed, offset, mode):
147
+ # Records the seed and offset tensors. These tensors are used to invoke
148
+ # the philox_rand functional primitives.
149
+ if mode == "forward":
150
+ cls.fwd_state.set_state(seed, offset)
151
+ cls.mark_beginning_of_forward()
152
+ else:
153
+ assert mode == "backward"
154
+ cls.bwd_state.set_state(seed, offset)
155
+
156
+ @classmethod
157
+ def get_state_as_tensor(cls):
158
+ # The only reason this exists is because we override get_rng_state and
159
+ # set_rng_state during tracing. get_rng_state expects a tensor output,
160
+ # so return (seed, offset) tuple upset other parts of the program like
161
+ # ctx.saved_tensors.
162
+
163
+ # A bad consequence is that if user saves and restores rng state, we
164
+ # have little bit of ugliness in the generated code, where we first
165
+ # concat the (seed, offset) to create a tensor for get_rng_state, and
166
+ # then split it back to get (seed, offset) tuple in set_rng_state.
167
+
168
+ # TODO: Investigate if there is be a better way to wrap the tuple in a
169
+ # false Tensor object, and then desugar it later on.
170
+ return cls.running_state.get_state_as_tensor()
171
+
172
+ @classmethod
173
+ def get_state_as_tuple(cls):
174
+ return cls.running_state.get_state_as_tuple()
175
+
176
+ @classmethod
177
+ def set_state_from_tensor(cls, x):
178
+ # This is only needed because we override set_rng_state. Look at the
179
+ # comment in get_state_from_tensor method.
180
+ cls.running_state.set_state_from_tensor(x)
181
+
182
+ @classmethod
183
+ def advance_offset(cls, consumed_offset):
184
+ cls.running_state.advance_offset(consumed_offset)
185
+
186
+ @classmethod
187
+ def get_current_relative_offset(cls):
188
+ return cls.running_state.relative_offset
189
+
190
+ @staticmethod
191
+ def multiple_of_4(offset):
192
+ # torch cuda rng state offset must be a multiple of 4. For inductor, as
193
+ # we sum up all the numel, the result might not be a multiple of 4. This
194
+ # method achieves that.
195
+ return (offset + 3) // 4 * 4
196
+
197
+ @classmethod
198
+ def get_updated_fwd_offset(cls):
199
+ # Short circuit if no rand ops were observed
200
+ if not cls.fwd_state.offset_advanced_alteast_once:
201
+ return cls.fwd_state.base_offset
202
+ return cls.multiple_of_4(
203
+ cls.fwd_state.base_offset + cls.fwd_state.relative_offset
204
+ )
205
+
206
+ @classmethod
207
+ def get_updated_bwd_offset(cls):
208
+ # Short circuit if no rand ops were observed
209
+ if not cls.bwd_state.offset_advanced_alteast_once:
210
+ return cls.bwd_state.base_offset
211
+ return cls.multiple_of_4(
212
+ cls.bwd_state.base_offset + cls.bwd_state.relative_offset
213
+ )
214
+
215
+
216
+ # Adding more decompositions which eventually use rand_like inside decomps.
217
+ # Adding these in rng_decompositions ensures the functionalization of rand_like
218
+ # ops used in these decomps. The list is copied from inductor codebase, which
219
+ # uses it for similar purpose.
220
+ #
221
+ # Caution - These decomps do not have same accuracy as that of eager. However,
222
+ # we can't just disable them with a config flag like fallback_random, because
223
+ # for functionalization of rng ops, we have to decompose these ops.
224
+ extra_random_decomps = get_decompositions(
225
+ [
226
+ aten.cauchy,
227
+ aten.cauchy_,
228
+ aten.exponential,
229
+ aten.exponential_,
230
+ aten.geometric,
231
+ aten.geometric_,
232
+ aten.native_dropout,
233
+ aten.normal,
234
+ aten.normal_,
235
+ aten.normal_functional,
236
+ aten.log_normal,
237
+ aten.log_normal_,
238
+ aten.rrelu_with_noise,
239
+ aten.rrelu_with_noise_,
240
+ aten.uniform_,
241
+ ]
242
+ )
243
+ register_extra_random_decomp = functools.partial(
244
+ decomp.register_decomposition, registry=extra_random_decomps
245
+ )
246
+
247
+
248
+ @register_extra_random_decomp([aten.bernoulli_])
249
+ def bernoulli_(self, p=0.5):
250
+ if self.device == torch.device("cpu"):
251
+ return NotImplemented
252
+ return self.copy_(torch.rand_like(self, dtype=torch.float32) < p)
253
+
254
+
255
+ @register_extra_random_decomp([aten.bernoulli.p])
256
+ def bernoulli_p(self, p=0.5, *, generator=None):
257
+ if self.device == torch.device("cpu"):
258
+ return NotImplemented
259
+ assert generator is None
260
+ return torch.rand_like(self, dtype=torch.float32) < p
261
+
262
+
263
+ rng_decompositions.update(extra_random_decomps) # type: ignore[arg-type]
env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dispatch/python.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import unittest.mock
3
+ from contextlib import contextmanager
4
+ from typing import Iterator
5
+
6
+ import torch
7
+ import torch._C
8
+ import torch._ops
9
+ import torch.utils._python_dispatch
10
+ import torch.utils._pytree as pytree
11
+
12
+ __all__ = ["enable_python_dispatcher", "no_python_dispatcher", "enable_pre_dispatch"]
13
+
14
+ no_python_dispatcher = torch._C._DisablePythonDispatcher
15
+ enable_python_dispatcher = torch._C._EnablePythonDispatcher
16
+ enable_pre_dispatch = torch._C._EnablePreDispatch
17
+
18
+ CROSSREF_FUNCTIONALIZE = False
19
+
20
+
21
+ def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]:
22
+ """
23
+ Warning: the set of overloads this will report is very subtle. It is precisely
24
+ the set of torch.ops functions that have actually been accessed from Python
25
+ (e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT
26
+ from the set of registered operators, which will in general be a larger set,
27
+ as this would include all operators which we ran C++ static initializers or
28
+ Python operator registration on. This does not eagerly populate the list on
29
+ torch.ops.aten; this list is lazy!
30
+
31
+ In other words, this is good for traversing over everything that has an
32
+ OpOverload object allocated in Python. We use it for cache invalidation, but
33
+ don't rely on this list being complete.
34
+
35
+ Note that even if we did report all C++ registered overloads, this isn't guaranteed
36
+ to be complete either, as a subsequent lazy load of a library which triggers more
37
+ registrations could add more things to the set.
38
+ """
39
+ for ns in torch.ops:
40
+ packets = getattr(torch.ops, ns)
41
+ for op_name in packets:
42
+ packet = getattr(packets, op_name)
43
+ for overload in packet:
44
+ yield getattr(packet, overload)
45
+
46
+
47
+ @contextmanager
48
+ def suspend_functionalization():
49
+ f_tls = torch._C._dispatch_tls_is_dispatch_key_included(
50
+ torch._C.DispatchKey.Functionalize
51
+ )
52
+ f_rv = torch._C._functionalization_reapply_views_tls()
53
+ if f_tls:
54
+ torch._disable_functionalization()
55
+ try:
56
+ yield
57
+ finally:
58
+ if f_tls:
59
+ torch._enable_functionalization(reapply_views=f_rv)
60
+
61
+
62
+ def check_tensor_metadata_matches(nv, rv, desc):
63
+ assert callable(desc)
64
+ assert nv.size() == rv.size(), f"{desc()}: sizes {nv.size()} != {rv.size()}"
65
+ assert nv.dtype == rv.dtype, f"{desc()}: dtype {nv.dtype} != {rv.dtype}"
66
+ same_strides, idx = torch._prims_common.check_significant_strides(
67
+ nv, rv, only_cuda=False
68
+ )
69
+ assert (
70
+ same_strides
71
+ ), f"{desc()}: strides {nv.stride()} != {rv.stride()} (mismatch at index {idx})"
72
+
73
+
74
+ def check_metadata_matches(n, r, desc):
75
+ assert callable(desc)
76
+ n_vals, n_spec = pytree.tree_flatten(n)
77
+ r_vals, r_spec = pytree.tree_flatten(r)
78
+ # TODO: test the specs match; empirically sometimes we have a tuple
79
+ # on one side and a list on the other
80
+ assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}"
81
+ for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals):
82
+ if not isinstance(rv, torch.Tensor):
83
+ continue
84
+ check_tensor_metadata_matches(nv, rv, lambda: f"{desc()} output {i}")
85
+
86
+
87
+ class Lit:
88
+ def __init__(self, s):
89
+ self.s = s
90
+
91
+ def __repr__(self):
92
+ return self.s
93
+
94
+
95
+ def _fmt(a: object) -> object:
96
+ if isinstance(a, torch.Tensor):
97
+ return Lit(
98
+ f"torch.empty_strided({tuple(a.size())}, {a.stride()}, dtype={a.dtype})"
99
+ )
100
+ else:
101
+ return a
102
+
103
+
104
+ def make_crossref_functionalize(op, final_key):
105
+ from torch._subclasses.fake_tensor import FakeTensorMode
106
+
107
+ # This case is pretty weird, suppress it for now
108
+ if op == torch.ops.aten.lift_fresh.default:
109
+ return final_key
110
+
111
+ def handler(*args, **kwargs):
112
+ fake_mode = FakeTensorMode()
113
+
114
+ def fakeify_defun(t):
115
+ if isinstance(t, torch.Tensor):
116
+ if torch._is_functional_tensor(t):
117
+ r = torch._from_functional_tensor(t)
118
+ # NB: This assumes that the inner tensor sizes/strides match
119
+ # the outer tensor sizes/strides. This doesn't necessarily have to
120
+ # be the case, see discussion at
121
+ # https://github.com/pytorch/pytorch/pull/87610/files/401ddeda1d769bedc88a12de332c7357b60e51a4#r1007264456
122
+ assert t.size() == r.size()
123
+ assert t.stride() == r.stride()
124
+ else:
125
+ r = t
126
+ # TODO: suppress guards
127
+ return fake_mode.from_tensor(r)
128
+ return t
129
+
130
+ def maybe_detach(t):
131
+ if isinstance(t, torch.Tensor):
132
+ return t.detach()
133
+ else:
134
+ return t
135
+
136
+ # TODO: This probably does the wrong thing if you're running other
137
+ # substantive modes with the normal op outside here
138
+ with torch.utils._python_dispatch._disable_current_modes(), suspend_functionalization():
139
+ f_args, f_kwargs = pytree.tree_map(fakeify_defun, (args, kwargs))
140
+ orig_f_args, orig_f_kwargs = pytree.tree_map(
141
+ maybe_detach, (f_args, f_kwargs)
142
+ )
143
+ with fake_mode:
144
+ f_r = op(*f_args, **f_kwargs)
145
+ r = op._op_dk(final_key, *args, **kwargs)
146
+
147
+ def desc():
148
+ fmt_args = ", ".join(
149
+ itertools.chain(
150
+ (repr(pytree.tree_map(_fmt, a)) for a in orig_f_args),
151
+ (
152
+ f"{k}={pytree.tree_map(_fmt, v)}"
153
+ for k, v in orig_f_kwargs.items()
154
+ ),
155
+ )
156
+ )
157
+ return f"{op}({fmt_args})"
158
+
159
+ check_metadata_matches(f_r, r, desc)
160
+ return r
161
+
162
+ return handler
163
+
164
+
165
+ # NB: enabling this is slow, don't do it in a hot loop. This is purely
166
+ # for debugging purposes.
167
+ @contextmanager
168
+ def enable_crossref_functionalize():
169
+ for op in all_py_loaded_overloads():
170
+ op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
171
+ try:
172
+ with enable_python_dispatcher(), unittest.mock.patch(
173
+ "torch._dispatch.python.CROSSREF_FUNCTIONALIZE", True
174
+ ):
175
+ yield
176
+ finally:
177
+ for op in all_py_loaded_overloads():
178
+ op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+ // TODO: Remove once torchvision has been updated to use the ATen header
3
+ #include <ATen/cuda/Atomic.cuh>
env-llmeval/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+ // TODO: Remove this header
3
+ #include <ATen/cuda/DeviceUtils.cuh>
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstring>
4
+ #include <mutex>
5
+ #include <unordered_map>
6
+
7
+ #include <c10/core/Allocator.h>
8
+ #include <c10/util/Flags.h>
9
+
10
+ // TODO: rename to c10
11
+ C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
12
+
13
+ namespace c10 {
14
+
15
+ using MemoryDeleter = void (*)(void*);
16
+
17
+ // A helper function that is basically doing nothing.
18
+ C10_API void NoDelete(void*);
19
+
20
+ // A simple struct that is used to report C10's memory allocation,
21
+ // deallocation status and out-of-memory events to the profiler
22
+ class C10_API ProfiledCPUMemoryReporter {
23
+ public:
24
+ ProfiledCPUMemoryReporter() = default;
25
+ void New(void* ptr, size_t nbytes);
26
+ void OutOfMemory(size_t nbytes);
27
+ void Delete(void* ptr);
28
+
29
+ private:
30
+ std::mutex mutex_;
31
+ std::unordered_map<void*, size_t> size_table_;
32
+ size_t allocated_ = 0;
33
+ size_t log_cnt_ = 0;
34
+ };
35
+
36
+ C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter();
37
+
38
+ // Get the CPU Allocator.
39
+ C10_API at::Allocator* GetCPUAllocator();
40
+ // Sets the CPU allocator to the given allocator: the caller gives away the
41
+ // ownership of the pointer.
42
+ C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0);
43
+
44
+ // Get the Default CPU Allocator
45
+ C10_API at::Allocator* GetDefaultCPUAllocator();
46
+
47
+ // Get the Default Mobile CPU Allocator
48
+ C10_API at::Allocator* GetDefaultMobileCPUAllocator();
49
+
50
+ // The CPUCachingAllocator is experimental and might disappear in the future.
51
+ // The only place that uses it is in StaticRuntime.
52
+ // Set the CPU Caching Allocator
53
+ C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0);
54
+ // Get the CPU Caching Allocator
55
+ C10_API Allocator* GetCPUCachingAllocator();
56
+
57
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Device.h>
4
+
5
+ namespace c10 {
6
+
7
+ using CopyBytesFunction = void (*)(
8
+ size_t nbytes,
9
+ const void* src,
10
+ Device src_device,
11
+ void* dst,
12
+ Device dst_device);
13
+
14
+ struct C10_API _CopyBytesFunctionRegisterer {
15
+ _CopyBytesFunctionRegisterer(
16
+ DeviceType from,
17
+ DeviceType to,
18
+ CopyBytesFunction func_sync,
19
+ CopyBytesFunction func_async = nullptr);
20
+ };
21
+
22
+ #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \
23
+ namespace { \
24
+ static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \
25
+ g_copy_function)(from, to, __VA_ARGS__); \
26
+ }
27
+
28
+ /*
29
+ * WARNING: Implementations for this function are currently registered from
30
+ * ATen and caffe2, not yet from c10. Don't use this if not either ATen
31
+ * or caffe2 is present as well.
32
+ * We can't move them yet, because the CUDA implementations aren't unified yet
33
+ * between ATen and caffe2.
34
+ * We're planning to move the implementations into c10/backend/xxx
35
+ * to make c10 self contained again.
36
+ */
37
+ C10_API void CopyBytes(
38
+ size_t nbytes,
39
+ const void* src,
40
+ Device src_device,
41
+ void* dst,
42
+ Device dst_device,
43
+ bool async);
44
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineDeviceGuard.h>
4
+
5
+ namespace c10 {
6
+
7
+ /// RAII guard that sets a certain default device in its constructor, and
8
+ /// changes it back to the device that was originally active upon destruction.
9
+ ///
10
+ /// The device is always reset to the one that was active at the time of
11
+ /// construction of the guard. Even if you `set_device` after construction, the
12
+ /// destructor will still reset the device to the one that was active at
13
+ /// construction time.
14
+ ///
15
+ /// This device guard does NOT have an uninitialized state; it is guaranteed
16
+ /// to reset a device on exit. If you are in a situation where you *might*
17
+ /// want to setup a guard (i.e., are looking for the moral equivalent
18
+ /// of optional<DeviceGuard>), see OptionalDeviceGuard.
19
+ class DeviceGuard {
20
+ public:
21
+ /// No default constructor; see Note [Omitted default constructor from RAII]
22
+ explicit DeviceGuard() = delete;
23
+
24
+ /// Set the current device to the passed Device.
25
+ explicit DeviceGuard(Device device) : guard_(device) {}
26
+
27
+ /// This constructor is for testing only.
28
+ explicit DeviceGuard(
29
+ Device device,
30
+ const impl::DeviceGuardImplInterface* impl)
31
+ : guard_(device, impl) {}
32
+
33
+ /// Copy is disallowed
34
+ DeviceGuard(const DeviceGuard&) = delete;
35
+ DeviceGuard& operator=(const DeviceGuard&) = delete;
36
+
37
+ /// Move is disallowed, as DeviceGuard does not have an uninitialized state,
38
+ /// which is required for moves on types with nontrivial destructors.
39
+ DeviceGuard(DeviceGuard&& other) = delete;
40
+ DeviceGuard& operator=(DeviceGuard&& other) = delete;
41
+
42
+ /// Sets the device to the given one. The specified device must be consistent
43
+ /// with the device type originally specified during guard construction.
44
+ ///
45
+ /// TODO: The consistency check here is inconsistent with StreamGuard's
46
+ /// behavior with set_stream, where a stream on a different device than
47
+ /// the original one isn't an error; we just reset the stream and then
48
+ /// switch devices.
49
+ void reset_device(at::Device device) {
50
+ guard_.reset_device(device);
51
+ }
52
+
53
+ /// This method is for testing only.
54
+ void reset_device(
55
+ at::Device device,
56
+ const impl::DeviceGuardImplInterface* impl) {
57
+ guard_.reset_device(device, impl);
58
+ }
59
+
60
+ /// Sets the device index to the given one. The device type is inferred
61
+ /// from the original device type the guard was constructed with.
62
+ void set_index(DeviceIndex index) {
63
+ guard_.set_index(index);
64
+ }
65
+
66
+ /// Returns the device that was set at the time the guard was constructed.
67
+ Device original_device() const {
68
+ return guard_.original_device();
69
+ }
70
+
71
+ /// Returns the most recent device that was set using this device guard,
72
+ /// either from construction, or via set_device.
73
+ Device current_device() const {
74
+ return guard_.current_device();
75
+ }
76
+
77
+ private:
78
+ impl::InlineDeviceGuard<impl::VirtualGuardImpl> guard_;
79
+ };
80
+
81
+ /**
82
+ * A OptionalDeviceGuard is an RAII class that sets a device to some value on
83
+ * initialization, and resets the device to its original value on destruction.
84
+ * Morally, a OptionalDeviceGuard is equivalent to optional<DeviceGuard>, but
85
+ * with extra constructors and methods as appropriate.
86
+ *
87
+ * Besides its obvious use (optionally applying a DeviceGuard),
88
+ * OptionalDeviceGuard is often also used for the following idiom:
89
+ *
90
+ * OptionalDeviceGuard g;
91
+ * for (const auto& t : tensors) {
92
+ * g.set_device(t.device());
93
+ * do_something_with(t);
94
+ * }
95
+ *
96
+ * This usage is marginally more efficient than constructing a DeviceGuard every
97
+ * iteration of the for loop, as it avoids an unnecessary device reset.
98
+ *
99
+ * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs
100
+ * when you use the nullary constructor, or pass a nullopt to the constructor.
101
+ * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the
102
+ * original device was and they do not reset on destruction. This is why
103
+ * original_device() and current_device() return optional<Device> rather than
104
+ * Device (as they do in DeviceGuard), and also is why we didn't just
105
+ * provide OptionalDeviceGuard by default and hide DeviceGuard from users.
106
+ *
107
+ * The semantics of an OptionalDeviceGuard are exactly explained by thinking
108
+ * of it as an optional<DeviceGuard>. In particular, an initialized
109
+ * OptionalDeviceGuard doesn't restore device to its value at construction; it
110
+ * restores device to its value *at initialization*. So if you have the
111
+ * program:
112
+ *
113
+ * setDevice(1);
114
+ * OptionalDeviceGuard g;
115
+ * setDevice(2);
116
+ * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes!
117
+ *
118
+ * On destruction, g will reset device to 2, rather than 1.
119
+ *
120
+ * An uninitialized OptionalDeviceGuard is distinct from a (initialized)
121
+ * DeviceGuard whose original_device_ and current_device_ match, since the
122
+ * DeviceGuard will still reset the device to original_device_.
123
+ */
124
+ class OptionalDeviceGuard {
125
+ public:
126
+ /// Create an uninitialized guard. Set the guard later using reset_device.
127
+ explicit OptionalDeviceGuard() = default;
128
+
129
+ /// Initialize the guard, setting the current device to the passed Device.
130
+ explicit OptionalDeviceGuard(Device device) : guard_(device) {}
131
+
132
+ /// Initialize the guard if a Device is passed; otherwise leave the
133
+ /// guard uninitialized.
134
+ explicit OptionalDeviceGuard(optional<Device> device) : guard_(device) {}
135
+
136
+ /// Constructor for testing only.
137
+ explicit OptionalDeviceGuard(
138
+ Device device,
139
+ const impl::DeviceGuardImplInterface* impl)
140
+ : guard_(device, impl) {}
141
+
142
+ /// Copy is disallowed
143
+ OptionalDeviceGuard(const OptionalDeviceGuard&) = delete;
144
+ OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete;
145
+
146
+ /// Move is disallowed
147
+ /// See Note [Explicit initialization of optional fields]
148
+ /// and // Note [Move construction for RAII guards is tricky]
149
+ /// for rationale.
150
+ OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete;
151
+ OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete;
152
+
153
+ /// Sets the device to the given one. The specified device must be consistent
154
+ /// with the device type originally specified during guard construction.
155
+ void reset_device(at::Device device) {
156
+ guard_.reset_device(device);
157
+ }
158
+
159
+ /// For testing only
160
+ void reset_device(
161
+ at::Device device,
162
+ const impl::DeviceGuardImplInterface* impl) {
163
+ guard_.reset_device(device, impl);
164
+ }
165
+
166
+ /// Returns the device that was set at the time the guard was constructed.
167
+ optional<Device> original_device() const {
168
+ return guard_.original_device();
169
+ }
170
+
171
+ /// Returns the most recent device that was set using this device guard,
172
+ /// either from construction, or via reset_device.
173
+ optional<Device> current_device() const {
174
+ return guard_.current_device();
175
+ }
176
+
177
+ private:
178
+ impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl> guard_{};
179
+ };
180
+
181
+ // Note [Whither the DeviceGuard boilerplate]
182
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
183
+ // Design note: in principle, we could avoid these wrappers using:
184
+ //
185
+ // using DeviceGuard = impl::InlineDeviceGuard<impl::VirtualGuardImpl>;
186
+ // using OptionalDeviceGuard =
187
+ // impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl>;
188
+ //
189
+ // But the error messages are worse, and our users can't just look at the
190
+ // header file to find out what's going on. Furthermore, for specializations
191
+ // like CUDAStreamGuard, it can be profitable to replace some interfaces with
192
+ // refined types (e.g., return CUDAStream instead of Stream). So, we eat
193
+ // the boilerplate and write out the API explicitly.
194
+
195
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This is directly synchronized with caffe2/proto/caffe2.proto, but
4
+ // doesn't require me to figure out how to get Protobuf headers into
5
+ // ATen/core (which would require a lot more build system hacking.)
6
+ // If you modify me, keep me synchronized with that file.
7
+
8
+ #include <c10/macros/Export.h>
9
+
10
+ #include <functional>
11
+ #include <ostream>
12
+
13
+ namespace c10 {
14
+
15
+ // These contains all device types that also have a BackendComponent
16
+ // and therefore participate in per-backend functionality dispatch keys.
17
+ // This is most backends except PrivateUse2 and PrivateUse3
18
+ #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \
19
+ _(CPU, extra) \
20
+ _(CUDA, extra) \
21
+ _(HIP, extra) \
22
+ _(XLA, extra) \
23
+ _(MPS, extra) \
24
+ _(IPU, extra) \
25
+ _(XPU, extra) \
26
+ _(HPU, extra) \
27
+ _(VE, extra) \
28
+ _(Lazy, extra) \
29
+ _(Meta, extra) \
30
+ _(MTIA, extra) \
31
+ _(PrivateUse1, extra)
32
+
33
+ enum class DeviceType : int8_t {
34
+ CPU = 0,
35
+ CUDA = 1, // CUDA.
36
+ MKLDNN = 2, // Reserved for explicit MKLDNN
37
+ OPENGL = 3, // OpenGL
38
+ OPENCL = 4, // OpenCL
39
+ IDEEP = 5, // IDEEP.
40
+ HIP = 6, // AMD HIP
41
+ FPGA = 7, // FPGA
42
+ ORT = 8, // ONNX Runtime / Microsoft
43
+ XLA = 9, // XLA / TPU
44
+ Vulkan = 10, // Vulkan
45
+ Metal = 11, // Metal
46
+ XPU = 12, // XPU
47
+ MPS = 13, // MPS
48
+ Meta = 14, // Meta (tensors with no data)
49
+ HPU = 15, // HPU / HABANA
50
+ VE = 16, // SX-Aurora / NEC
51
+ Lazy = 17, // Lazy Tensors
52
+ IPU = 18, // Graphcore IPU
53
+ MTIA = 19, // Meta training and inference devices
54
+ PrivateUse1 = 20, // PrivateUse1 device
55
+ // NB: If you add more devices:
56
+ // - Change the implementations of DeviceTypeName and isValidDeviceType
57
+ // in DeviceType.cpp
58
+ // - Change the number below
59
+ COMPILE_TIME_MAX_DEVICE_TYPES = 21,
60
+ };
61
+
62
+ constexpr DeviceType kCPU = DeviceType::CPU;
63
+ constexpr DeviceType kCUDA = DeviceType::CUDA;
64
+ constexpr DeviceType kHIP = DeviceType::HIP;
65
+ constexpr DeviceType kFPGA = DeviceType::FPGA;
66
+ constexpr DeviceType kORT = DeviceType::ORT;
67
+ constexpr DeviceType kXLA = DeviceType::XLA;
68
+ constexpr DeviceType kMPS = DeviceType::MPS;
69
+ constexpr DeviceType kMeta = DeviceType::Meta;
70
+ constexpr DeviceType kVulkan = DeviceType::Vulkan;
71
+ constexpr DeviceType kMetal = DeviceType::Metal;
72
+ constexpr DeviceType kXPU = DeviceType::XPU;
73
+ constexpr DeviceType kHPU = DeviceType::HPU;
74
+ constexpr DeviceType kVE = DeviceType::VE;
75
+ constexpr DeviceType kLazy = DeviceType::Lazy;
76
+ constexpr DeviceType kIPU = DeviceType::IPU;
77
+ constexpr DeviceType kMTIA = DeviceType::MTIA;
78
+ constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1;
79
+
80
+ // define explicit int constant
81
+ constexpr int COMPILE_TIME_MAX_DEVICE_TYPES =
82
+ static_cast<int>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES);
83
+
84
+ static_assert(
85
+ COMPILE_TIME_MAX_DEVICE_TYPES <= 21,
86
+ "Hey! You seem to be adding a lot of new DeviceTypes. The intent was "
87
+ "for this constant to reflect the actual number of DeviceTypes we support "
88
+ "in PyTorch; it's important that this number is not too large as we "
89
+ "use this to allocate stack arrays in some places in our code. If you "
90
+ "are indeed just adding the 20th device type, feel free to change "
91
+ "the check to 32; but if you are adding some sort of extensible device "
92
+ "types registration, please be aware that you are affecting code that "
93
+ "this number is small. Try auditing uses of this constant.");
94
+
95
+ C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false);
96
+
97
+ C10_API bool isValidDeviceType(DeviceType d);
98
+
99
+ C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type);
100
+
101
+ C10_API void register_privateuse1_backend(const std::string& backend_name);
102
+ C10_API std::string get_privateuse1_backend(bool lower_case = true);
103
+
104
+ } // namespace c10
105
+
106
+ namespace std {
107
+ template <>
108
+ struct hash<c10::DeviceType> {
109
+ std::size_t operator()(c10::DeviceType k) const {
110
+ return std::hash<int>()(static_cast<int>(k));
111
+ }
112
+ };
113
+ } // namespace std
114
+
115
+ namespace torch {
116
+ using c10::DeviceType;
117
+ }
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/Event.h ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineEvent.h>
4
+ #include <c10/core/impl/VirtualGuardImpl.h>
5
+
6
+ namespace c10 {
7
+
8
+ /**
9
+ * A backend-generic movable, not copyable, not thread-safe event.
10
+ *
11
+ * The design of this event follows that of CUDA and HIP events. These events
12
+ * are recorded and waited on by streams and can be rerecorded to,
13
+ * each rerecording essentially creating a new version of the event.
14
+ * For example, if (in CPU time), stream X is asked to record E,
15
+ * stream Y waits on E, and stream X is asked to record E again, then Y will
16
+ * wait for X to finish the first call to record and not the second, because
17
+ * it's waiting on the first version of event E, not the second.
18
+ * Querying an event only returns the status of its most recent version.
19
+ *
20
+ * Backend-generic events are implemented by this class and
21
+ * impl::InlineEvent. In addition to these events there are also
22
+ * some backend-specific events, like ATen's CUDAEvent. Each of these
23
+ * classes has its own use.
24
+ *
25
+ * impl::InlineEvent<...> or a backend-specific event should be
26
+ * preferred when the backend is known at compile time and known to
27
+ * be compiled. Backend-specific events may have additional functionality.
28
+ *
29
+ * This Event should be used if a particular backend may not be available,
30
+ * or the backend required is not known at compile time.
31
+ *
32
+ * These generic events are built on top of DeviceGuardImpls, analogous
33
+ * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls,"
34
+ * is no longer entirely accurate, as these classes implement the
35
+ * backend-specific logic for a generic backend interface.
36
+ *
37
+ * See DeviceGuardImplInterface.h for a list of all supported flags.
38
+ */
39
+
40
+ struct Event final {
41
+ // Constructors
42
+ Event() = delete;
43
+ Event(
44
+ const DeviceType _device_type,
45
+ const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
46
+ : impl_{_device_type, _flag} {}
47
+
48
+ // Copy constructor and copy assignment operator (deleted)
49
+ Event(const Event&) = delete;
50
+ Event& operator=(const Event&) = delete;
51
+
52
+ // Move constructor and move assignment operator
53
+ Event(Event&&) noexcept = default;
54
+ Event& operator=(Event&&) noexcept = default;
55
+
56
+ // Destructor
57
+ ~Event() = default;
58
+
59
+ // Getters
60
+ Device device() const noexcept {
61
+ return Device(device_type(), device_index());
62
+ }
63
+ DeviceType device_type() const noexcept {
64
+ return impl_.device_type();
65
+ }
66
+ DeviceIndex device_index() const noexcept {
67
+ return impl_.device_index();
68
+ }
69
+ EventFlag flag() const noexcept {
70
+ return impl_.flag();
71
+ }
72
+ bool was_marked_for_recording() const noexcept {
73
+ return impl_.was_marked_for_recording();
74
+ }
75
+
76
+ /**
77
+ * Calls record() if and only if record() has never been called for this
78
+ * event. Note: because Event is not thread-safe recordOnce() may call
79
+ * record() multiple times if called from multiple threads.
80
+ */
81
+ void recordOnce(const Stream& stream) {
82
+ impl_.recordOnce(stream);
83
+ }
84
+
85
+ /**
86
+ * Increments the event's version and enqueues a job with this version
87
+ * in the stream's work queue. When the stream process that job
88
+ * it notifies all streams waiting on / blocked by that version of the
89
+ * event to continue and marks that version as recorded.
90
+ * */
91
+ void record(const Stream& stream) {
92
+ impl_.record(stream);
93
+ }
94
+
95
+ /**
96
+ * Does nothing if the event has not been scheduled to be recorded.
97
+ * If the event was previously enqueued to be recorded, a command
98
+ * to wait for the version of the event that exists at the time of this call
99
+ * is inserted in the stream's work queue.
100
+ * When the stream reaches this command it will stop processing
101
+ * additional commands until that version of the event is marked as recorded.
102
+ */
103
+ void block(const Stream& stream) const {
104
+ impl_.block(stream);
105
+ }
106
+
107
+ /**
108
+ * Returns true if (and only if)
109
+ * (1) the event has never been scheduled to be recorded
110
+ * (2) the current version is marked as recorded.
111
+ * Returns false otherwise.
112
+ */
113
+ bool query() const {
114
+ return impl_.query();
115
+ }
116
+
117
+ private:
118
+ impl::InlineEvent<impl::VirtualGuardImpl> impl_;
119
+ };
120
+
121
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/AutogradState.h>
4
+ #include <c10/core/impl/LocalDispatchKeySet.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ namespace c10 {
8
+
9
+ // A RAII, thread local (!) guard that enables or disables inference mode upon
10
+ // construction, and sets it back to the original value upon destruction.
11
+ struct C10_API InferenceMode {
12
+ // Note [Expected TLS state in InferenceMode]:
13
+ // InferenceMode: ADInplaceOrView not in
14
+ // raw_local_dispatch_key_set.included(),
15
+ // Autograd in raw_local_dispatch_key_set.excluded()
16
+ // GradMode is disabled.
17
+ // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(),
18
+ // Autograd not in raw_local_dispatch_key_set.excluded()
19
+ // GradMode is enabled by default unless toggled manually
20
+ // through other APIs, e.g. NoGradGuard.
21
+ //
22
+ // Invariant:
23
+ // - ADInplaceOrView is never in the excluded set
24
+ // - Autograd is never in the included set
25
+ // - Setting InferenceMode will set GradMode accordingly, but not vice versa.
26
+ //
27
+ // 1. Why do we put ADInplaceOrView in included set outside InferenceMode?
28
+ //
29
+ // Inplace update to inference tensor outside InferenceMode is not
30
+ // allowed. See Note [Inplace update inference tensor] for more details.
31
+ // Without going through ADInplaceOrView kernel, we cannot throw error
32
+ // for `inference_tensor.add_(1)` case.
33
+ //
34
+ // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode?
35
+ //
36
+ // For example:
37
+ // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true);
38
+ // torch::Tensor k = a + 2;
39
+ // {
40
+ // c10::InferenceMode guard(true);
41
+ // k.add_(2);
42
+ // }
43
+ // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
44
+ // prepared for future autograd.
45
+ //
46
+ // 3. Why does setting InferenceMode also set GradMode?
47
+ //
48
+ // This is required since InferenceMode is a faster and more restrictive
49
+ // version of NoGradGuard. All runtime checks using GradMode::is_enabled()
50
+ // are applicable to InferenceMode as well, e.g.
51
+ // `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
52
+ InferenceMode(bool enabled = true)
53
+ : prev_mode(AutogradState::get_tls_state()),
54
+ prev_keyset(c10::impl::tls_local_dispatch_key_set()) {
55
+ // Enabling inference mode means disabling grad modes
56
+ // And disabling inference mode means enabling grad modes
57
+ AutogradState::set_tls_state(AutogradState(
58
+ /* grad_mode */ !enabled,
59
+ /* inference_mode */ enabled,
60
+ /* fw_grad_mode */ !enabled,
61
+ /* multithreading_enabled*/ !enabled));
62
+ DispatchKeySet included = enabled
63
+ ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView)
64
+ : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView);
65
+ DispatchKeySet excluded = enabled
66
+ ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
67
+ : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
68
+ c10::impl::PODLocalDispatchKeySet cur_keyset{};
69
+ cur_keyset.set_included(included);
70
+ cur_keyset.set_excluded(excluded);
71
+ c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);
72
+ }
73
+
74
+ ~InferenceMode() {
75
+ AutogradState::set_tls_state(prev_mode);
76
+ c10::impl::_force_tls_local_dispatch_key_set(prev_keyset);
77
+ }
78
+ static bool is_enabled();
79
+
80
+ private:
81
+ AutogradState prev_mode;
82
+ c10::impl::LocalDispatchKeySet prev_keyset;
83
+ };
84
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/python_stub.h>
6
+
7
+ #include <atomic>
8
+
9
+ namespace c10 {
10
+
11
+ // A PyHandleCache represents a cached pointer from a C++ object to
12
+ // a Python object that represents that object analogously in Python.
13
+ // Upon a cache hit, the relevant object can be retrieved after a test
14
+ // and then a memory load. Two conditions must hold to be able to use this
15
+ // class:
16
+ //
17
+ // - This must truly be a cache; e.g., the caller must be able to produce
18
+ // the object some other way if the cache hit misses.
19
+ //
20
+ // - This must truly be a handle; e.g., the Python object referenced by
21
+ // this class must have static lifetime. This means we don't have to
22
+ // maintain strong ownership or deallocate the object when the C++ object
23
+ // dies. Static lifetime is a good idea in conjunction with the cache,
24
+ // since if you are producing a fresh object on miss you won't be
25
+ // maintaining object identity. If you need bidirectional ownership,
26
+ // you will want to factor out the pattern in TensorImpl with
27
+ // resurrection.
28
+ //
29
+ // This cache is expected to not improve perf under torchdeploy, as one
30
+ // interpreter will fill up the cache, and all the interpreters will be
31
+ // unable to use the slot. A potential improvement is to have multiple
32
+ // slots (one per interpreter), which will work in deployment scenarios
33
+ // where there a stable, fixed number of interpreters. You can also store
34
+ // the relevant state in the Python library, rather than in the non-Python
35
+ // library (although in many cases, this is not convenient, as there may
36
+ // not be a way to conveniently index based on the object.)
37
+ class PyHandleCache {
38
+ public:
39
+ PyHandleCache() : pyinterpreter_(nullptr), data_(nullptr) {}
40
+
41
+ // Attempt to fetch the pointer from the cache, if the PyInterpreter
42
+ // matches. If it doesn't exist, or the cache entry is not valid,
43
+ // use slow_accessor to get the real pointer value and return that
44
+ // (possibly writing it to the cache, if the cache entry is
45
+ // available.)
46
+ template <typename F>
47
+ PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor)
48
+ const {
49
+ // Note [Memory ordering on Python interpreter tag]
50
+ impl::PyInterpreter* interpreter =
51
+ pyinterpreter_.load(std::memory_order_acquire);
52
+ if (C10_LIKELY(interpreter == self_interpreter)) {
53
+ return data_;
54
+ } else if (interpreter == nullptr) {
55
+ auto* r = slow_accessor();
56
+ impl::PyInterpreter* expected = nullptr;
57
+ // attempt to claim this cache entry with the specified interpreter tag
58
+ if (pyinterpreter_.compare_exchange_strong(
59
+ expected, self_interpreter, std::memory_order_acq_rel)) {
60
+ data_ = r;
61
+ }
62
+ // This shouldn't be possible, as you should be GIL protected
63
+ TORCH_INTERNAL_ASSERT(expected != self_interpreter);
64
+ return r;
65
+ } else {
66
+ return slow_accessor();
67
+ }
68
+ }
69
+
70
+ private:
71
+ mutable std::atomic<impl::PyInterpreter*> pyinterpreter_;
72
+ mutable PyObject* data_;
73
+ };
74
+
75
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/core/DispatchKey.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * QEngine is an enum that is used to select the engine to run quantized ops.
11
+ * Keep this enum in sync with get_qengine_id() in
12
+ * torch/backends/quantized/__init__.py
13
+ */
14
+ enum class QEngine : uint8_t {
15
+ NoQEngine = 0,
16
+ FBGEMM = 1,
17
+ QNNPACK = 2,
18
+ ONEDNN = 3,
19
+ X86 = 4,
20
+ };
21
+
22
+ constexpr auto kNoQEngine = QEngine::NoQEngine;
23
+ constexpr auto kFBGEMM = QEngine::FBGEMM;
24
+ constexpr auto kQNNPACK = QEngine::QNNPACK;
25
+ constexpr auto kONEDNN = QEngine::ONEDNN;
26
+ constexpr auto kX86 = QEngine::X86;
27
+
28
+ inline std::string toString(QEngine qengine) {
29
+ switch (qengine) {
30
+ case kNoQEngine:
31
+ return "NoQEngine";
32
+ case kFBGEMM:
33
+ return "FBGEMM";
34
+ case kQNNPACK:
35
+ return "QNNPACK";
36
+ case kONEDNN:
37
+ return "ONEDNN";
38
+ case kX86:
39
+ return "X86";
40
+ default:
41
+ TORCH_CHECK(
42
+ false, "Unrecognized Quantized Engine: ", static_cast<int>(qengine));
43
+ }
44
+ }
45
+
46
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Storage.h>
4
+ #include <c10/util/UniqueVoidPtr.h>
5
+
6
+ #include <atomic>
7
+ #include <memory>
8
+
9
+ namespace c10 {
10
+
11
+ // A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr
12
+ // to implement a shared DataPtr. Normally, a DataPtr is unique, but we use
13
+ // this custom context and the `refcounted_deleter` function below to make the
14
+ // DataPtr act like a non-unique DataPtr. This context object holds onto an
15
+ // inner context and deleter function which handle the actual deletion of the
16
+ // data when the refcount reaches 0.
17
+ //
18
+ // This shared DataPtr feature is only used when storages are shared between
19
+ // multiple Python interpreters in MultiPy. Before storages had PyObject
20
+ // preservation, interpreters could just share the same StorageImpl instance.
21
+ // But now a StorageImpl can only be associated with one interpreter in order
22
+ // to properly manage a zombie PyObject. So we share storages across Python
23
+ // interpreters by creating a different StorageImpl instance for each one, but
24
+ // they all point to the same data.
25
+ struct C10_API RefcountedDeleterContext {
26
+ RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter)
27
+ : other_ctx(other_ctx, other_deleter), refcount(1) {}
28
+
29
+ std::unique_ptr<void, c10::DeleterFnPtr> other_ctx;
30
+ std::atomic_int refcount;
31
+ };
32
+
33
+ // `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement
34
+ // a shared DataPtr.
35
+ //
36
+ // Warning: This should only be called on a pointer to
37
+ // a RefcountedDeleterContext that was allocated on the heap with `new`,
38
+ // because when the refcount reaches 0, the context is deleted with `delete`
39
+ C10_API void refcounted_deleter(void* ctx_);
40
+
41
+ // If the storage's DataPtr does not use `refcounted_deleter`, replace it with
42
+ // a DataPtr that does, so it can be shared between multiple StorageImpls
43
+ C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage);
44
+
45
+ // Create a new StorageImpl that points to the same data. If the original
46
+ // StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced
47
+ // with one that does
48
+ C10_API c10::Storage newStorageImplFromRefcountedDataPtr(
49
+ const c10::Storage& storage);
50
+
51
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/PyInterpreter.h>
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/util/python_stub.h>
6
+
7
+ namespace c10 {
8
+
9
+ // This is an safe owning holder for a PyObject, akin to pybind11's
10
+ // py::object, with two major differences:
11
+ //
12
+ // - It is in c10/core; i.e., you can use this type in contexts where
13
+ // you do not have a libpython dependency
14
+ //
15
+ // - It is multi-interpreter safe (ala torchdeploy); when you fetch
16
+ // the underlying PyObject* you are required to specify what the current
17
+ // interpreter context is and we will check that you match it.
18
+ //
19
+ // It is INVALID to store a reference to a Tensor object in this way;
20
+ // you should just use TensorImpl directly in that case!
21
+ struct C10_API SafePyObject {
22
+ // Steals a reference to data
23
+ SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
24
+ : data_(data), pyinterpreter_(pyinterpreter) {}
25
+ SafePyObject(SafePyObject&& other) noexcept
26
+ : data_(std::exchange(other.data_, nullptr)),
27
+ pyinterpreter_(other.pyinterpreter_) {}
28
+
29
+ // In principle this could be copyable if we add an incref to PyInterpreter
30
+ // but for now it's easier to just disallow it.
31
+ SafePyObject(SafePyObject const&) = delete;
32
+ SafePyObject& operator=(SafePyObject const&) = delete;
33
+
34
+ ~SafePyObject() {
35
+ if (data_ != nullptr) {
36
+ (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false);
37
+ }
38
+ }
39
+
40
+ c10::impl::PyInterpreter& pyinterpreter() const {
41
+ return *pyinterpreter_;
42
+ }
43
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
44
+
45
+ // stop tracking the current object, and return it
46
+ PyObject* release() {
47
+ auto rv = data_;
48
+ data_ = nullptr;
49
+ return rv;
50
+ }
51
+
52
+ private:
53
+ PyObject* data_;
54
+ c10::impl::PyInterpreter* pyinterpreter_;
55
+ };
56
+
57
+ // Like SafePyObject, but non-owning. Good for references to global PyObjects
58
+ // that will be leaked on interpreter exit. You get a copy constructor/assign
59
+ // this way.
60
+ struct C10_API SafePyHandle {
61
+ SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {}
62
+ SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
63
+ : data_(data), pyinterpreter_(pyinterpreter) {}
64
+
65
+ c10::impl::PyInterpreter& pyinterpreter() const {
66
+ return *pyinterpreter_;
67
+ }
68
+ PyObject* ptr(const c10::impl::PyInterpreter*) const;
69
+ void reset() {
70
+ data_ = nullptr;
71
+ pyinterpreter_ = nullptr;
72
+ }
73
+ operator bool() {
74
+ return data_;
75
+ }
76
+
77
+ private:
78
+ PyObject* data_;
79
+ c10::impl::PyInterpreter* pyinterpreter_;
80
+ };
81
+
82
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Deprecated.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Float8_e4m3fn.h>
7
+ #include <c10/util/Float8_e4m3fnuz.h>
8
+ #include <c10/util/Float8_e5m2.h>
9
+ #include <c10/util/Float8_e5m2fnuz.h>
10
+ #include <c10/util/Half.h>
11
+ #include <c10/util/bits.h>
12
+ #include <c10/util/complex.h>
13
+ #include <c10/util/qint32.h>
14
+ #include <c10/util/qint8.h>
15
+ #include <c10/util/quint2x4.h>
16
+ #include <c10/util/quint4x2.h>
17
+ #include <c10/util/quint8.h>
18
+
19
+ #include <array>
20
+ #include <complex>
21
+ #include <cstdint>
22
+ #include <ostream>
23
+
24
+ namespace c10 {
25
+
26
+ // For the macros below:
27
+ // NB: If you want to macro some code for all non-QInt scalar types (i.e. types
28
+ // with complete information, you probably want one of the
29
+ // AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND
30
+ // macros below, which are designed to behave similarly to the Dispatch macros
31
+ // with the same name.
32
+
33
+ // NB: Order matters for this macro; it is relied upon in
34
+ // _promoteTypesLookup and the serialization format.
35
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \
36
+ _(uint8_t, Byte) /* 0 */ \
37
+ _(int8_t, Char) /* 1 */ \
38
+ _(int16_t, Short) /* 2 */ \
39
+ _(int, Int) /* 3 */ \
40
+ _(int64_t, Long) /* 4 */ \
41
+ _(at::Half, Half) /* 5 */ \
42
+ _(float, Float) /* 6 */ \
43
+ _(double, Double) /* 7 */ \
44
+ _(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
45
+ _(c10::complex<float>, ComplexFloat) /* 9 */ \
46
+ _(c10::complex<double>, ComplexDouble) /* 10 */ \
47
+ _(bool, Bool) /* 11 */ \
48
+ _(c10::qint8, QInt8) /* 12 */ \
49
+ _(c10::quint8, QUInt8) /* 13 */ \
50
+ _(c10::qint32, QInt32) /* 14 */ \
51
+ _(at::BFloat16, BFloat16) /* 15 */ \
52
+ _(c10::quint4x2, QUInt4x2) /* 16 */ \
53
+ _(c10::quint2x4, QUInt2x4) /* 17 */ \
54
+ _(c10::bits1x8, Bits1x8) /* 18 */ \
55
+ _(c10::bits2x4, Bits2x4) /* 19 */ \
56
+ _(c10::bits4x2, Bits4x2) /* 20 */ \
57
+ _(c10::bits8, Bits8) /* 21 */ \
58
+ _(c10::bits16, Bits16) /* 22 */ \
59
+ _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \
60
+ _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \
61
+ _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \
62
+ _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */
63
+
64
+ // If you want to support ComplexHalf for real, add ComplexHalf
65
+ // into this macro (and change the name). But beware: convert()
66
+ // doesn't work for all the conversions you need...
67
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \
68
+ _(uint8_t, Byte) \
69
+ _(int8_t, Char) \
70
+ _(int16_t, Short) \
71
+ _(int, Int) \
72
+ _(int64_t, Long) \
73
+ _(at::Half, Half) \
74
+ _(float, Float) \
75
+ _(double, Double) \
76
+ _(c10::complex<float>, ComplexFloat) \
77
+ _(c10::complex<double>, ComplexDouble) \
78
+ _(bool, Bool) \
79
+ _(at::BFloat16, BFloat16) \
80
+ _(at::Float8_e5m2, Float8_e5m2) \
81
+ _(at::Float8_e4m3fn, Float8_e4m3fn)
82
+
83
+ #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
84
+ _(uint8_t, Byte) \
85
+ _(int8_t, Char) \
86
+ _(int16_t, Short) \
87
+ _(int, Int) \
88
+ _(int64_t, Long) \
89
+ _(at::Half, Half) \
90
+ _(float, Float) \
91
+ _(double, Double) \
92
+ _(c10::complex<c10::Half>, ComplexHalf) \
93
+ _(c10::complex<float>, ComplexFloat) \
94
+ _(c10::complex<double>, ComplexDouble) \
95
+ _(bool, Bool) \
96
+ _(at::BFloat16, BFloat16) \
97
+ _(at::Float8_e5m2, Float8_e5m2) \
98
+ _(at::Float8_e4m3fn, Float8_e4m3fn) \
99
+ _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \
100
+ _(at::Float8_e4m3fnuz, Float8_e4m3fnuz)
101
+
102
+ enum class ScalarType : int8_t {
103
+ #define DEFINE_ST_ENUM_VAL_(_1, n) n,
104
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_)
105
+ #undef DEFINE_ENUM_ST_ENUM_VAL_
106
+ Undefined,
107
+ NumOptions
108
+ };
109
+
110
+ constexpr uint16_t NumScalarTypes =
111
+ static_cast<uint16_t>(ScalarType::NumOptions);
112
+
113
+ namespace impl {
114
+
115
+ // These are used to map ScalarTypes to C++ types.
116
+
117
+ template <c10::ScalarType N>
118
+ struct ScalarTypeToCPPType;
119
+
120
+ #define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \
121
+ template <> \
122
+ struct ScalarTypeToCPPType<c10::ScalarType::scalar_type> { \
123
+ using type = cpp_type; \
124
+ \
125
+ /* This is a workaround for the CUDA bug which prevents */ \
126
+ /* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
127
+ /* ambiguous reference which can't to be resolved. For some reason it */ \
128
+ /* can't pick between at::detail and at::cuda::detail. */ \
129
+ /* For repro example, please see: */ \
130
+ /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
131
+ /* TODO: remove once the bug is fixed. */ \
132
+ static type t; \
133
+ };
134
+
135
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType)
136
+
137
+ #undef SPECIALIZE_ScalarTypeToCPPType
138
+
139
+ template <c10::ScalarType N>
140
+ using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType<N>::type;
141
+
142
+ } // namespace impl
143
+
144
+ template <typename T>
145
+ struct CppTypeToScalarType;
146
+
147
+ #define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \
148
+ template <> \
149
+ struct CppTypeToScalarType<cpp_type> \
150
+ : std:: \
151
+ integral_constant<c10::ScalarType, c10::ScalarType::scalar_type> { \
152
+ };
153
+
154
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
155
+
156
+ #undef SPECIALIZE_CppTypeToScalarType
157
+
158
+ #define AT_FORALL_INT_TYPES(_) \
159
+ _(uint8_t, Byte) \
160
+ _(int8_t, Char) \
161
+ _(int16_t, Short) \
162
+ _(int, Int) \
163
+ _(int64_t, Long)
164
+
165
+ #define AT_FORALL_SCALAR_TYPES(_) \
166
+ _(uint8_t, Byte) \
167
+ _(int8_t, Char) \
168
+ _(int16_t, Short) \
169
+ _(int, Int) \
170
+ _(int64_t, Long) \
171
+ _(float, Float) \
172
+ _(double, Double)
173
+
174
+ #define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \
175
+ _(uint8_t, Byte) \
176
+ _(int8_t, Char) \
177
+ _(int16_t, Short) \
178
+ _(int, Int) \
179
+ _(int64_t, Long) \
180
+ _(float, Float) \
181
+ _(double, Double) \
182
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
183
+ ::c10::ScalarType::SCALARTYPE>::t), \
184
+ SCALARTYPE)
185
+
186
+ #define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \
187
+ _(uint8_t, Byte) \
188
+ _(int8_t, Char) \
189
+ _(int16_t, Short) \
190
+ _(int, Int) \
191
+ _(int64_t, Long) \
192
+ _(float, Float) \
193
+ _(double, Double) \
194
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
195
+ ::c10::ScalarType::SCALARTYPE1>::t), \
196
+ SCALARTYPE1) \
197
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
198
+ ::c10::ScalarType::SCALARTYPE2>::t), \
199
+ SCALARTYPE2)
200
+
201
+ #define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \
202
+ _(uint8_t, Byte) \
203
+ _(int8_t, Char) \
204
+ _(int16_t, Short) \
205
+ _(int, Int) \
206
+ _(int64_t, Long) \
207
+ _(float, Float) \
208
+ _(double, Double) \
209
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
210
+ ::c10::ScalarType::SCALARTYPE1>::t), \
211
+ SCALARTYPE1) \
212
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
213
+ ::c10::ScalarType::SCALARTYPE2>::t), \
214
+ SCALARTYPE2) \
215
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
216
+ ::c10::ScalarType::SCALARTYPE3>::t), \
217
+ SCALARTYPE3)
218
+
219
+ #define AT_FORALL_SCALAR_TYPES_AND4( \
220
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, _) \
221
+ _(uint8_t, Byte) \
222
+ _(int8_t, Char) \
223
+ _(int16_t, Short) \
224
+ _(int, Int) \
225
+ _(int64_t, Long) \
226
+ _(float, Float) \
227
+ _(double, Double) \
228
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
229
+ ::c10::ScalarType::SCALARTYPE1>::t), \
230
+ SCALARTYPE1) \
231
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
232
+ ::c10::ScalarType::SCALARTYPE2>::t), \
233
+ SCALARTYPE2) \
234
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
235
+ ::c10::ScalarType::SCALARTYPE3>::t), \
236
+ SCALARTYPE3) \
237
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
238
+ ::c10::ScalarType::SCALARTYPE4>::t), \
239
+ SCALARTYPE4)
240
+
241
+ #define AT_FORALL_SCALAR_TYPES_AND5( \
242
+ SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, SCALARTYPE5, _) \
243
+ _(uint8_t, Byte) \
244
+ _(int8_t, Char) \
245
+ _(int16_t, Short) \
246
+ _(int, Int) \
247
+ _(int64_t, Long) \
248
+ _(float, Float) \
249
+ _(double, Double) \
250
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
251
+ ::c10::ScalarType::SCALARTYPE1>::t), \
252
+ SCALARTYPE1) \
253
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
254
+ ::c10::ScalarType::SCALARTYPE2>::t), \
255
+ SCALARTYPE2) \
256
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
257
+ ::c10::ScalarType::SCALARTYPE3>::t), \
258
+ SCALARTYPE3) \
259
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
260
+ ::c10::ScalarType::SCALARTYPE4>::t), \
261
+ SCALARTYPE4) \
262
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
263
+ ::c10::ScalarType::SCALARTYPE5>::t), \
264
+ SCALARTYPE5)
265
+
266
+ #define AT_FORALL_SCALAR_TYPES_AND6( \
267
+ SCALARTYPE1, \
268
+ SCALARTYPE2, \
269
+ SCALARTYPE3, \
270
+ SCALARTYPE4, \
271
+ SCALARTYPE5, \
272
+ SCALARTYPE6, \
273
+ _) \
274
+ _(uint8_t, Byte) \
275
+ _(int8_t, Char) \
276
+ _(int16_t, Short) \
277
+ _(int, Int) \
278
+ _(int64_t, Long) \
279
+ _(float, Float) \
280
+ _(double, Double) \
281
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
282
+ ::c10::ScalarType::SCALARTYPE1>::t), \
283
+ SCALARTYPE1) \
284
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
285
+ ::c10::ScalarType::SCALARTYPE2>::t), \
286
+ SCALARTYPE2) \
287
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
288
+ ::c10::ScalarType::SCALARTYPE3>::t), \
289
+ SCALARTYPE3) \
290
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
291
+ ::c10::ScalarType::SCALARTYPE4>::t), \
292
+ SCALARTYPE4) \
293
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
294
+ ::c10::ScalarType::SCALARTYPE5>::t), \
295
+ SCALARTYPE5) \
296
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
297
+ ::c10::ScalarType::SCALARTYPE6>::t), \
298
+ SCALARTYPE6)
299
+
300
+ #define AT_FORALL_SCALAR_TYPES_AND7( \
301
+ SCALARTYPE1, \
302
+ SCALARTYPE2, \
303
+ SCALARTYPE3, \
304
+ SCALARTYPE4, \
305
+ SCALARTYPE5, \
306
+ SCALARTYPE6, \
307
+ SCALARTYPE7, \
308
+ _) \
309
+ _(uint8_t, Byte) \
310
+ _(int8_t, Char) \
311
+ _(int16_t, Short) \
312
+ _(int, Int) \
313
+ _(int64_t, Long) \
314
+ _(float, Float) \
315
+ _(double, Double) \
316
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
317
+ ::c10::ScalarType::SCALARTYPE1>::t), \
318
+ SCALARTYPE1) \
319
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
320
+ ::c10::ScalarType::SCALARTYPE2>::t), \
321
+ SCALARTYPE2) \
322
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
323
+ ::c10::ScalarType::SCALARTYPE3>::t), \
324
+ SCALARTYPE3) \
325
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
326
+ ::c10::ScalarType::SCALARTYPE4>::t), \
327
+ SCALARTYPE4) \
328
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
329
+ ::c10::ScalarType::SCALARTYPE5>::t), \
330
+ SCALARTYPE5) \
331
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
332
+ ::c10::ScalarType::SCALARTYPE6>::t), \
333
+ SCALARTYPE6) \
334
+ _(decltype(::c10::impl::ScalarTypeToCPPType< \
335
+ ::c10::ScalarType::SCALARTYPE7>::t), \
336
+ SCALARTYPE7)
337
+
338
+ #define AT_FORALL_QINT_TYPES(_) \
339
+ _(c10::qint8, QInt8) \
340
+ _(c10::quint8, QUInt8) \
341
+ _(c10::qint32, QInt32) \
342
+ _(c10::quint4x2, QUInt4x2) \
343
+ _(c10::quint2x4, QUInt2x4)
344
+
345
+ #define AT_FORALL_COMPLEX_TYPES(_) \
346
+ _(c10::complex<float>, ComplexFloat) \
347
+ _(c10::complex<double>, ComplexDouble)
348
+
349
+ #define DEFINE_CONSTANT(_, name) \
350
+ constexpr ScalarType k##name = ScalarType::name;
351
+
352
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
353
+ #undef DEFINE_CONSTANT
354
+
355
+ static inline const char* toString(ScalarType t) {
356
+ #define DEFINE_CASE(_, name) \
357
+ case ScalarType::name: \
358
+ return #name;
359
+
360
+ switch (t) {
361
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE)
362
+ default:
363
+ return "UNKNOWN_SCALAR";
364
+ }
365
+ #undef DEFINE_CASE
366
+ }
367
+
368
+ static inline size_t elementSize(ScalarType t) {
369
+ #define CASE_ELEMENTSIZE_CASE(ctype, name) \
370
+ case ScalarType::name: \
371
+ return sizeof(ctype);
372
+
373
+ switch (t) {
374
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
375
+ default:
376
+ TORCH_CHECK(false, "Unknown ScalarType");
377
+ }
378
+ #undef CASE_ELEMENTSIZE_CASE
379
+ }
380
+
381
+ static inline bool isIntegralType(ScalarType t, bool includeBool) {
382
+ bool isIntegral =
383
+ (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int ||
384
+ t == ScalarType::Long || t == ScalarType::Short);
385
+
386
+ return isIntegral || (includeBool && t == ScalarType::Bool);
387
+ }
388
+
389
+ C10_DEPRECATED_MESSAGE(
390
+ "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
391
+ static inline bool isIntegralType(ScalarType t) {
392
+ return isIntegralType(t, /*includeBool=*/false);
393
+ }
394
+
395
+ static inline bool isFloat8Type(ScalarType t) {
396
+ return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz ||
397
+ t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz;
398
+ }
399
+
400
+ static inline bool isReducedFloatingType(ScalarType t) {
401
+ return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t);
402
+ }
403
+
404
+ static inline bool isFloatingType(ScalarType t) {
405
+ return t == ScalarType::Double || t == ScalarType::Float ||
406
+ isReducedFloatingType(t);
407
+ }
408
+
409
+ static inline bool isComplexType(ScalarType t) {
410
+ return (
411
+ t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat ||
412
+ t == ScalarType::ComplexDouble);
413
+ }
414
+
415
+ static inline bool isQIntType(ScalarType t) {
416
+ // Don't forget to extend this when adding new QInt types
417
+ return t == ScalarType::QInt8 || t == ScalarType::QUInt8 ||
418
+ t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 ||
419
+ t == ScalarType::QUInt2x4;
420
+ }
421
+
422
+ static inline bool isBitsType(ScalarType t) {
423
+ return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 ||
424
+ t == ScalarType::Bits4x2 || t == ScalarType::Bits8 ||
425
+ t == ScalarType::Bits16;
426
+ }
427
+
428
+ static inline ScalarType toQIntType(ScalarType t) {
429
+ switch (t) {
430
+ case ScalarType::Byte:
431
+ return ScalarType::QUInt8;
432
+ case ScalarType::Char:
433
+ return ScalarType::QInt8;
434
+ case ScalarType::Int:
435
+ return ScalarType::QInt32;
436
+ default:
437
+ return t;
438
+ }
439
+ }
440
+
441
+ static inline ScalarType toUnderlying(ScalarType t) {
442
+ switch (t) {
443
+ case ScalarType::QUInt8:
444
+ return ScalarType::Byte;
445
+ case ScalarType::QInt8:
446
+ return ScalarType::Char;
447
+ case ScalarType::QInt32:
448
+ return ScalarType::Int;
449
+ case ScalarType::QUInt4x2:
450
+ return ScalarType::Byte;
451
+ case ScalarType::QUInt2x4:
452
+ return ScalarType::Byte;
453
+ default:
454
+ return t;
455
+ }
456
+ }
457
+
458
+ static inline bool isSignedType(ScalarType t) {
459
+ TORCH_CHECK(!isQIntType(t), "isSignedType not supported for quantized types");
460
+ #define CASE_SIGNED(ctype, name) \
461
+ case ScalarType::name: \
462
+ return std::numeric_limits<ctype>::is_signed;
463
+
464
+ switch (t) {
465
+ case ScalarType::Bits1x8:
466
+ case ScalarType::Bits2x4:
467
+ case ScalarType::Bits4x2:
468
+ case ScalarType::Bits8:
469
+ case ScalarType::Bits16:
470
+ TORCH_CHECK(false, "Bits types are undefined");
471
+ case ScalarType::ComplexHalf:
472
+ case ScalarType::ComplexFloat:
473
+ case ScalarType::ComplexDouble:
474
+ return true;
475
+ AT_FORALL_SCALAR_TYPES_AND5(
476
+ Half, Bool, BFloat16, Float8_e5m2, Float8_e4m3fn, CASE_SIGNED)
477
+ default:
478
+ TORCH_CHECK(false, "Unknown ScalarType");
479
+ }
480
+ #undef CASE_SIGNED
481
+ }
482
+
483
+ static inline bool isUnderlying(ScalarType type, ScalarType qtype) {
484
+ return type == toUnderlying(qtype);
485
+ }
486
+
487
+ static inline ScalarType toRealValueType(ScalarType t) {
488
+ switch (t) {
489
+ case ScalarType::ComplexHalf:
490
+ return ScalarType::Half;
491
+ case ScalarType::ComplexFloat:
492
+ return ScalarType::Float;
493
+ case ScalarType::ComplexDouble:
494
+ return ScalarType::Double;
495
+ default:
496
+ return t;
497
+ }
498
+ }
499
+
500
+ static inline ScalarType toComplexType(ScalarType t) {
501
+ switch (t) {
502
+ case ScalarType::BFloat16:
503
+ // BFloat16 has range equivalent to Float,
504
+ // so we map it to ComplexFloat.
505
+ return ScalarType::ComplexFloat;
506
+ case ScalarType::Half:
507
+ return ScalarType::ComplexHalf;
508
+ case ScalarType::Float:
509
+ return ScalarType::ComplexFloat;
510
+ case ScalarType::Double:
511
+ return ScalarType::ComplexDouble;
512
+ case ScalarType::ComplexHalf:
513
+ return ScalarType::ComplexHalf;
514
+ case ScalarType::ComplexFloat:
515
+ return ScalarType::ComplexFloat;
516
+ case ScalarType::ComplexDouble:
517
+ return ScalarType::ComplexDouble;
518
+ default:
519
+ TORCH_CHECK(false, "Unknown Complex ScalarType for ", t);
520
+ }
521
+ }
522
+
523
+ // see tensor_attributes.rst for detailed explanation and examples
524
+ // of casting rules.
525
+ static inline bool canCast(const ScalarType from, const ScalarType to) {
526
+ // We disallow complex -> non complex, e.g., float_tensor *= complex is
527
+ // disallowed.
528
+ if (isComplexType(from) && !isComplexType(to)) {
529
+ return false;
530
+ }
531
+ // We disallow float -> integral, e.g., int_tensor *= float is disallowed.
532
+ if (isFloatingType(from) && isIntegralType(to, false)) {
533
+ return false;
534
+ }
535
+
536
+ // Treat bool as a distinct "category," to be consistent with type promotion
537
+ // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same
538
+ // category as `bool_tensor`, we would not promote. Differing categories
539
+ // implies `bool_tensor += 5` is disallowed.
540
+ //
541
+ // NB: numpy distinguishes "unsigned" as a category to get the desired
542
+ // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because:
543
+ // * We don't want the performance hit of checking the runtime sign of
544
+ // Scalars.
545
+ // * `uint8_tensor + 5 -> int64_tensor` would be undesirable.
546
+ if (from != ScalarType::Bool && to == ScalarType::Bool) {
547
+ return false;
548
+ }
549
+ return true;
550
+ }
551
+
552
+ static inline ScalarType promoteTypes(ScalarType a, ScalarType b) {
553
+ // This is generated according to NumPy's promote_types
554
+ constexpr auto u1 = ScalarType::Byte;
555
+ constexpr auto i1 = ScalarType::Char;
556
+ constexpr auto i2 = ScalarType::Short;
557
+ constexpr auto i4 = ScalarType::Int;
558
+ constexpr auto i8 = ScalarType::Long;
559
+ constexpr auto f2 = ScalarType::Half;
560
+ constexpr auto f4 = ScalarType::Float;
561
+ constexpr auto f8 = ScalarType::Double;
562
+ constexpr auto c2 = ScalarType::ComplexHalf;
563
+ constexpr auto c4 = ScalarType::ComplexFloat;
564
+ constexpr auto c8 = ScalarType::ComplexDouble;
565
+ constexpr auto b1 = ScalarType::Bool;
566
+ constexpr auto bf = ScalarType::BFloat16;
567
+ constexpr auto ud = ScalarType::Undefined;
568
+ if (a == ud || b == ud) {
569
+ return ScalarType::Undefined;
570
+ }
571
+
572
+ // If the two types are equal, return that type
573
+ if (a == b) {
574
+ return a;
575
+ }
576
+
577
+ // Handle identically equal types
578
+ if (isQIntType(a) || isQIntType(b)) {
579
+ TORCH_CHECK(
580
+ false,
581
+ "promoteTypes with quantized numbers is not handled yet; figure out what the correct rules should be, offending types: ",
582
+ toString(a),
583
+ " ",
584
+ toString(b));
585
+ }
586
+
587
+ if (isBitsType(a) || isBitsType(b)) {
588
+ return ScalarType::Undefined;
589
+ }
590
+
591
+ if (isFloat8Type(a) || isFloat8Type(b)) {
592
+ TORCH_CHECK(
593
+ false,
594
+ "Promotion for Float8 Types is not supported, attempted to promote ",
595
+ toString(a),
596
+ " and ",
597
+ toString(b));
598
+ }
599
+
600
+ // Bits, Quantized and Float8 are 14 dtypes already handled and not included
601
+ // in the promotion table below.
602
+ static constexpr int num_bits_types = static_cast<int>(ScalarType::Bits16) -
603
+ static_cast<int>(ScalarType::Bits1x8) + 1;
604
+
605
+ static constexpr int num_float8_types =
606
+ static_cast<int>(ScalarType::Float8_e4m3fnuz) -
607
+ static_cast<int>(ScalarType::Float8_e5m2) + 1;
608
+
609
+ static constexpr int num_qint_types = static_cast<int>(ScalarType::QInt32) -
610
+ static_cast<int>(ScalarType::QInt8) + 1;
611
+
612
+ static constexpr int num_quint_types =
613
+ static_cast<int>(ScalarType::QUInt2x4) -
614
+ static_cast<int>(ScalarType::QUInt4x2) + 1;
615
+
616
+ static constexpr int num_quantized_types = num_qint_types + num_quint_types;
617
+
618
+ static constexpr int num_missing_types =
619
+ num_bits_types + num_float8_types + num_quantized_types;
620
+
621
+ // Bfloat16 is at position 15 in the ScalerType enum, There are three types
622
+ // below bf16 not included in the table, Qint8, QUInt8, QInt32. Every other
623
+ // type above bf16, i.e. {Bits, Quantized, Float8} are not included in the
624
+ // table.
625
+
626
+ // If either of the types is bf16, we need to shift the type down by the one
627
+ // missing section in the table that is less then bf16 i.e {QInt8, QUInt8,
628
+ // QInt32}
629
+ a = a == bf ? static_cast<ScalarType>(static_cast<int>(a) - num_qint_types)
630
+ : a;
631
+ b = b == bf ? static_cast<ScalarType>(static_cast<int>(b) - num_qint_types)
632
+ : b;
633
+
634
+ // We decrease the promotion table by the number of missing types -> 14
635
+ // and then subtract 1 more from the table since we don't store ud to ud
636
+ // mapping.
637
+ static constexpr int NUM_PROMOTE_TYPES =
638
+ static_cast<int>(ScalarType::NumOptions) - num_missing_types - 1;
639
+
640
+ // this matrix has to be consistent with
641
+ // AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS undefined is used where we
642
+ // are not sure about the correct value for type promotion.
643
+ // clang-format off
644
+ static constexpr std::
645
+ array<std::array<ScalarType, NUM_PROMOTE_TYPES>, NUM_PROMOTE_TYPES>
646
+ _promoteTypesLookup = {{
647
+ /* u1 i1 i2 i4 i8 f2 f4 f8 c2 c4 c8 b1 bf*/
648
+ /* u1 */ {u1, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, u1, bf},
649
+ /* i1 */ {i2, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, i1, bf},
650
+ /* i2 */ {i2, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, i2, bf},
651
+ /* i4 */ {i4, i4, i4, i4, i8, f2, f4, f8, c2, c4, c8, i4, bf},
652
+ /* i8 */ {i8, i8, i8, i8, i8, f2, f4, f8, c2, c4, c8, i8, bf},
653
+ /* f2 */ {f2, f2, f2, f2, f2, f2, f4, f8, c2, c4, c8, f2, f4},
654
+ /* f4 */ {f4, f4, f4, f4, f4, f4, f4, f8, c4, c4, c8, f4, f4},
655
+ /* f8 */ {f8, f8, f8, f8, f8, f8, f8, f8, c8, c8, c8, f8, f8},
656
+ /* c2 */ {c2, c2, c2, c2, c2, c2, c4, c8, c2, c4, c8, c2, c4},
657
+ /* c4 */ {c4, c4, c4, c4, c4, c4, c4, c8, c4, c4, c8, c4, c4},
658
+ /* c8 */ {c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8},
659
+ /* b1 */ {u1, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, b1, bf},
660
+ /* bf */ {bf, bf, bf, bf, bf, f4, f4, f8, c4, c4, c8, bf, bf},
661
+ }};
662
+ // clang-format on
663
+ return _promoteTypesLookup[static_cast<int>(a)][static_cast<int>(b)];
664
+ }
665
+
666
+ inline std::ostream& operator<<(
667
+ std::ostream& stream,
668
+ at::ScalarType scalar_type) {
669
+ return stream << toString(scalar_type);
670
+ }
671
+
672
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/InlineStreamGuard.h>
4
+
5
+ namespace c10 {
6
+
7
+ /**
8
+ * A StreamGuard is an RAII class that changes the current device
9
+ * to the device corresponding to some stream, and changes the
10
+ * default stream on that device to be this stream.
11
+ *
12
+ * Use of StreamGuard is HIGHLY discouraged in operator definitions. In
13
+ * a single operator, you probably don't know enough about the global
14
+ * state of the world to profitably decide how to set streams. Let
15
+ * the caller handle this appropriately, and just use the current stream
16
+ * in your operator code.
17
+ *
18
+ * This StreamGuard does NOT have an uninitialized state; it is guaranteed
19
+ * to reset the stream and device on exit. If you are in a situation
20
+ * where you *might* want to setup a stream guard, see OptionalStreamGuard.
21
+ */
22
+ struct StreamGuard {
23
+ /// No default constructor, see Note [Omitted default constructor from RAII]
24
+ explicit StreamGuard() = delete;
25
+
26
+ /// Set the current device to the device associated with the passed stream,
27
+ /// and set the current stream on that device to the passed stream.
28
+ explicit StreamGuard(Stream stream) : guard_(stream) {}
29
+
30
+ /// Copy is disallowed
31
+ StreamGuard(const StreamGuard&) = delete;
32
+ StreamGuard& operator=(const StreamGuard&) = delete;
33
+
34
+ /// Move is disallowed, as StreamGuard does not have an uninitialized state,
35
+ /// which is required for moves on types with nontrivial destructors.
36
+ StreamGuard(StreamGuard&& other) = delete;
37
+ StreamGuard& operator=(StreamGuard&& other) = delete;
38
+
39
+ /// Resets the currently set stream to the original stream and
40
+ /// the currently set device to the original device. Then,
41
+ /// set the current device to the device associated with the passed stream,
42
+ /// and set the current stream on that device to the passed stream.
43
+ ///
44
+ /// NOTE: this implementation may skip some stream/device setting if
45
+ /// it can prove that it is unnecessary.
46
+ ///
47
+ /// WARNING: reset_stream does NOT preserve previously set streams on
48
+ /// different devices. If you need to set streams on multiple devices
49
+ /// on , use MultiStreamGuard instead.
50
+ void reset_stream(Stream stream) {
51
+ guard_.reset_stream(stream);
52
+ }
53
+
54
+ /// Returns the stream that was set at the time the guard was constructed.
55
+ Stream original_stream() const {
56
+ return guard_.original_stream();
57
+ }
58
+
59
+ /// Returns the most recent stream that was set using this device guard,
60
+ /// either from construction, or via set_stream.
61
+ Stream current_stream() const {
62
+ return guard_.current_stream();
63
+ }
64
+
65
+ /// Returns the most recent device that was set using this device guard,
66
+ /// either from construction, or via set_device/reset_device/set_index.
67
+ Device current_device() const {
68
+ return guard_.current_device();
69
+ }
70
+
71
+ /// Returns the device that was set at the most recent reset_stream(),
72
+ /// or otherwise the device at construction time.
73
+ Device original_device() const {
74
+ return guard_.original_device();
75
+ }
76
+
77
+ private:
78
+ c10::impl::InlineStreamGuard<impl::VirtualGuardImpl> guard_;
79
+ };
80
+
81
+ /**
82
+ * An OptionalStreamGuard is an RAII class that sets a device to some value on
83
+ * initialization, and resets the device to its original value on destruction.
84
+ * See OptionalDeviceGuard for more guidance on how to use this class.
85
+ */
86
+ struct OptionalStreamGuard {
87
+ /// Create an uninitialized guard.
88
+ explicit OptionalStreamGuard() = default;
89
+
90
+ /// Set the current device to the device associated with the passed stream,
91
+ /// and set the current stream on that device to the passed stream.
92
+ explicit OptionalStreamGuard(Stream stream) : guard_(stream) {}
93
+
94
+ /// Set the current device to the device associated with the passed stream,
95
+ /// and set the current stream on that device to the passed stream,
96
+ /// if the passed stream is not nullopt.
97
+ explicit OptionalStreamGuard(optional<Stream> stream_opt)
98
+ : guard_(stream_opt) {}
99
+
100
+ /// Copy is disallowed
101
+ OptionalStreamGuard(const OptionalStreamGuard&) = delete;
102
+ OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete;
103
+
104
+ // See Note [Move construction for RAII guards is tricky]
105
+ OptionalStreamGuard(OptionalStreamGuard&& other) = delete;
106
+
107
+ // See Note [Move assignment for RAII guards is tricky]
108
+ OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete;
109
+
110
+ /// Resets the currently set stream to the original stream and
111
+ /// the currently set device to the original device. Then,
112
+ /// set the current device to the device associated with the passed stream,
113
+ /// and set the current stream on that device to the passed stream.
114
+ /// Initializes the guard if it was not previously initialized.
115
+ void reset_stream(Stream stream) {
116
+ guard_.reset_stream(stream);
117
+ }
118
+
119
+ /// Returns the stream that was set at the time the guard was most recently
120
+ /// initialized, or nullopt if the guard is uninitialized.
121
+ optional<Stream> original_stream() const {
122
+ return guard_.original_stream();
123
+ }
124
+
125
+ /// Returns the most recent stream that was set using this stream guard,
126
+ /// either from construction, or via reset_stream, if the guard is
127
+ /// initialized, or nullopt if the guard is uninitialized.
128
+ optional<Stream> current_stream() const {
129
+ return guard_.current_stream();
130
+ }
131
+
132
+ /// Restore the original device and stream, resetting this guard to
133
+ /// uninitialized state.
134
+ void reset() {
135
+ guard_.reset();
136
+ }
137
+
138
+ private:
139
+ c10::impl::InlineOptionalStreamGuard<impl::VirtualGuardImpl> guard_{};
140
+ };
141
+
142
+ /**
143
+ * A MultiStreamGuard is an RAII class that sets the current streams of a set of
144
+ * devices all at once, and resets them to their original values on destruction.
145
+ */
146
+ struct MultiStreamGuard {
147
+ /// Set the current streams to the passed streams on each of their respective
148
+ /// devices.
149
+ explicit MultiStreamGuard(ArrayRef<Stream> streams) : guard_(streams) {}
150
+
151
+ /// Copy is disallowed
152
+ MultiStreamGuard(const MultiStreamGuard&) = delete;
153
+ MultiStreamGuard& operator=(const MultiStreamGuard&) = delete;
154
+
155
+ // See Note [Move construction for RAII guards is tricky]
156
+ MultiStreamGuard(MultiStreamGuard&& other) = delete;
157
+
158
+ // See Note [Move assignment for RAII guards is tricky]
159
+ MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete;
160
+
161
+ private:
162
+ c10::impl::InlineMultiStreamGuard<impl::VirtualGuardImpl> guard_;
163
+ };
164
+
165
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/SymNodeImpl.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/intrusive_ptr.h>
7
+
8
+ namespace c10 {
9
+
10
+ class C10_API SymBool {
11
+ public:
12
+ /*implicit*/ SymBool(bool b) : data_(b){};
13
+ SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) {
14
+ TORCH_CHECK(ptr_->is_bool());
15
+ };
16
+ SymBool() : data_(false) {}
17
+
18
+ SymNodeImpl* toSymNodeImplUnowned() const {
19
+ return ptr_.get();
20
+ }
21
+
22
+ SymNodeImpl* release() && {
23
+ return std::move(ptr_).release();
24
+ }
25
+
26
+ // Only valid if is_heap_allocated()
27
+ SymNode toSymNodeImpl() const;
28
+
29
+ // Guaranteed to return a SymNode, wrapping using base if necessary
30
+ SymNode wrap_node(const SymNode& base) const;
31
+
32
+ bool expect_bool() const {
33
+ c10::optional<bool> c = maybe_as_bool();
34
+ TORCH_CHECK(c.has_value());
35
+ return *c;
36
+ }
37
+
38
+ SymBool sym_and(const SymBool&) const;
39
+ SymBool sym_or(const SymBool&) const;
40
+ SymBool sym_not() const;
41
+
42
+ SymBool operator&(const SymBool& other) const {
43
+ return sym_and(other);
44
+ }
45
+ SymBool operator|(const SymBool& other) const {
46
+ return sym_or(other);
47
+ }
48
+ SymBool operator~() const {
49
+ return sym_not();
50
+ }
51
+
52
+ // Insert a guard for the bool to be its concrete value, and then return
53
+ // that value. Note that C++ comparison operations default to returning
54
+ // bool, so it's not so common to have to call this
55
+ bool guard_bool(const char* file, int64_t line) const;
56
+ bool expect_true(const char* file, int64_t line) const;
57
+
58
+ bool has_hint() const;
59
+
60
+ bool as_bool_unchecked() const {
61
+ return data_;
62
+ }
63
+
64
+ c10::optional<bool> maybe_as_bool() const {
65
+ if (!is_heap_allocated()) {
66
+ return c10::make_optional(data_);
67
+ }
68
+ return toSymNodeImplUnowned()->constant_bool();
69
+ }
70
+
71
+ bool is_heap_allocated() const {
72
+ return ptr_;
73
+ }
74
+
75
+ private:
76
+ // TODO: optimize to union
77
+ bool data_;
78
+ SymNode ptr_;
79
+ };
80
+
81
+ C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
82
+
83
+ #define TORCH_SYM_CHECK(cond, ...) \
84
+ TORCH_CHECK((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
85
+ #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) \
86
+ TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__)
87
+
88
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Optional.h>
7
+ #include <c10/util/intrusive_ptr.h>
8
+
9
+ namespace c10 {
10
+
11
+ class SymNodeImpl;
12
+ using SymNode = c10::intrusive_ptr<SymNodeImpl>;
13
+
14
+ // When you add a method, you also need to edit
15
+ // torch/csrc/jit/python/init.cpp
16
+ // torch/csrc/utils/python_symnode.h
17
+ // c10/core/ConstantSymNodeImpl.h
18
+ class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
19
+ public:
20
+ ~SymNodeImpl() override = default;
21
+
22
+ template <typename T>
23
+ c10::intrusive_ptr<T> dyn_cast() const {
24
+ return c10::intrusive_ptr<T>::reclaim_copy(dynamic_cast<T*>(this));
25
+ }
26
+
27
+ // these could be pure virtual when we implement LTC versions
28
+ virtual bool is_int() {
29
+ TORCH_CHECK(false, "NYI");
30
+ };
31
+ virtual bool is_bool() {
32
+ TORCH_CHECK(false, "NYI");
33
+ };
34
+ virtual bool is_float() {
35
+ TORCH_CHECK(false, "NYI");
36
+ };
37
+ virtual SymNode add(const SymNode& other) {
38
+ TORCH_CHECK(false, "NYI");
39
+ };
40
+ virtual SymNode sub(const SymNode& other) {
41
+ TORCH_CHECK(false, "NYI");
42
+ };
43
+ virtual SymNode mul(const SymNode& other) {
44
+ TORCH_CHECK(false, "NYI");
45
+ };
46
+ virtual SymNode truediv(const SymNode& other) {
47
+ TORCH_CHECK(false, "NYI");
48
+ };
49
+ virtual SymNode pow(const SymNode& other) {
50
+ TORCH_CHECK(false, "NYI");
51
+ };
52
+ virtual SymNode floordiv(const SymNode& other) {
53
+ TORCH_CHECK(false, "NYI");
54
+ };
55
+ virtual SymNode mod(const SymNode& other) {
56
+ TORCH_CHECK(false, "NYI");
57
+ };
58
+ virtual SymNode eq(const SymNode& other) {
59
+ TORCH_CHECK(false, "NYI");
60
+ };
61
+ virtual SymNode ne(const SymNode& other) {
62
+ TORCH_CHECK(false, "NYI");
63
+ };
64
+ virtual SymNode gt(const SymNode& other) {
65
+ TORCH_CHECK(false, "NYI");
66
+ };
67
+ virtual SymNode lt(const SymNode& other) {
68
+ TORCH_CHECK(false, "NYI");
69
+ };
70
+ virtual SymNode le(const SymNode& other) {
71
+ TORCH_CHECK(false, "NYI");
72
+ };
73
+ virtual SymNode ge(const SymNode& other) {
74
+ TORCH_CHECK(false, "NYI");
75
+ };
76
+ virtual SymNode ceil() {
77
+ TORCH_CHECK(false, "NYI");
78
+ };
79
+ virtual SymNode floor() {
80
+ TORCH_CHECK(false, "NYI");
81
+ };
82
+ virtual SymNode neg() {
83
+ TORCH_CHECK(false, "NYI");
84
+ };
85
+ virtual SymNode sym_min(const SymNode& other) {
86
+ TORCH_CHECK(false, "NYI");
87
+ };
88
+ virtual SymNode sym_max(const SymNode& other) {
89
+ TORCH_CHECK(false, "NYI");
90
+ };
91
+ virtual SymNode sym_or(const SymNode& other) {
92
+ TORCH_CHECK(false, "NYI");
93
+ };
94
+ virtual SymNode sym_and(const SymNode& other) {
95
+ TORCH_CHECK(false, "NYI");
96
+ };
97
+ virtual SymNode sym_not() {
98
+ TORCH_CHECK(false, "NYI");
99
+ };
100
+ virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) {
101
+ TORCH_CHECK(false, "NYI");
102
+ };
103
+ // NB: self is ignored here, only the arguments are used
104
+ virtual SymNode is_contiguous(
105
+ ArrayRef<SymNode> sizes,
106
+ ArrayRef<SymNode> strides) {
107
+ TORCH_CHECK(false, "NYI");
108
+ };
109
+ virtual SymNode is_channels_last_contiguous_2d(
110
+ ArrayRef<SymNode> sizes,
111
+ ArrayRef<SymNode> strides) {
112
+ TORCH_CHECK(false, "NYI");
113
+ };
114
+ virtual SymNode is_channels_last_contiguous_3d(
115
+ ArrayRef<SymNode> sizes,
116
+ ArrayRef<SymNode> strides) {
117
+ TORCH_CHECK(false, "NYI");
118
+ };
119
+ virtual SymNode is_channels_last_strides_2d(
120
+ ArrayRef<SymNode> sizes,
121
+ ArrayRef<SymNode> strides) {
122
+ TORCH_CHECK(false, "NYI");
123
+ };
124
+ virtual SymNode is_channels_last_strides_3d(
125
+ ArrayRef<SymNode> sizes,
126
+ ArrayRef<SymNode> strides) {
127
+ TORCH_CHECK(false, "NYI");
128
+ };
129
+ virtual SymNode is_non_overlapping_and_dense(
130
+ ArrayRef<SymNode> sizes,
131
+ ArrayRef<SymNode> strides) {
132
+ TORCH_CHECK(false, "NYI");
133
+ };
134
+ virtual SymNode clone() {
135
+ TORCH_CHECK(false, "NYI");
136
+ };
137
+ virtual SymNode sym_float() {
138
+ TORCH_CHECK(false, "NYI");
139
+ }
140
+ virtual SymNode wrap_int(int64_t num) {
141
+ TORCH_CHECK(false, "NYI");
142
+ };
143
+ virtual SymNode wrap_float(double num) {
144
+ TORCH_CHECK(false, "NYI");
145
+ };
146
+ virtual SymNode wrap_bool(bool num) {
147
+ TORCH_CHECK(false, "NYI");
148
+ };
149
+ virtual int64_t guard_int(const char* file, int64_t line) {
150
+ TORCH_CHECK(false, "NYI");
151
+ };
152
+ virtual bool guard_bool(const char* file, int64_t line) {
153
+ TORCH_CHECK(false, "NYI");
154
+ };
155
+ virtual double guard_float(const char* file, int64_t line) {
156
+ TORCH_CHECK(false, "NYI");
157
+ };
158
+ virtual bool expect_true(const char* file, int64_t line) {
159
+ // No improvement for unbacked SymBools by default, replace this
160
+ // with a better implementation!
161
+ return guard_bool(file, line);
162
+ };
163
+ virtual bool expect_size(const char* file, int64_t line) {
164
+ // No improvement for unbacked SymInts by default, replace this
165
+ // with a better implementation!
166
+ return ge(wrap_int(0))->guard_bool(file, line);
167
+ };
168
+ virtual int64_t int_() {
169
+ TORCH_CHECK(false, "NYI");
170
+ };
171
+ virtual bool bool_() {
172
+ TORCH_CHECK(false, "NYI");
173
+ };
174
+ virtual bool has_hint() {
175
+ TORCH_CHECK(false, "NYI");
176
+ };
177
+ virtual std::string str() {
178
+ TORCH_CHECK(false, "NYI");
179
+ };
180
+ virtual c10::optional<int64_t> singleton_int() {
181
+ return c10::nullopt;
182
+ }
183
+ virtual c10::optional<int64_t> singleton_coeff() {
184
+ return c10::nullopt;
185
+ }
186
+ virtual c10::optional<int64_t> constant_int() {
187
+ return c10::nullopt;
188
+ }
189
+ virtual c10::optional<bool> constant_bool() {
190
+ return c10::nullopt;
191
+ }
192
+ virtual c10::optional<int64_t> maybe_as_int() {
193
+ return c10::nullopt;
194
+ }
195
+ virtual bool is_constant() {
196
+ return false;
197
+ }
198
+ virtual bool is_symbolic() {
199
+ return true;
200
+ }
201
+ std::ostream& operator<<(std::ostream& os) {
202
+ os << str();
203
+ return os;
204
+ }
205
+ };
206
+
207
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/c10/core/alignment.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+
5
+ namespace c10 {
6
+
7
+ #ifdef C10_MOBILE
8
+ // Use 16-byte alignment on mobile
9
+ // - ARM NEON AArch32 and AArch64
10
+ // - x86[-64] < AVX
11
+ constexpr size_t gAlignment = 16;
12
+ #else
13
+ // Use 64-byte alignment should be enough for computation up to AVX512.
14
+ constexpr size_t gAlignment = 64;
15
+ #endif
16
+
17
+ constexpr size_t gPagesize = 4096;
18
+ // since the default thp pagesize is 2MB, enable thp only
19
+ // for buffers of size 2MB or larger to avoid memory bloating
20
+ constexpr size_t gAlloc_threshold_thp = 2 * 1024 * 1024;
21
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/clog.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stdarg.h>
4
+ #include <stdlib.h>
5
+ #include <inttypes.h>
6
+
7
+ #define CLOG_NONE 0
8
+ #define CLOG_FATAL 1
9
+ #define CLOG_ERROR 2
10
+ #define CLOG_WARNING 3
11
+ #define CLOG_INFO 4
12
+ #define CLOG_DEBUG 5
13
+
14
+ #ifndef CLOG_VISIBILITY
15
+ #if defined(__ELF__)
16
+ #define CLOG_VISIBILITY __attribute__((__visibility__("internal")))
17
+ #elif defined(__MACH__)
18
+ #define CLOG_VISIBILITY __attribute__((__visibility__("hidden")))
19
+ #else
20
+ #define CLOG_VISIBILITY
21
+ #endif
22
+ #endif
23
+
24
+ #ifndef CLOG_ARGUMENTS_FORMAT
25
+ #if defined(__GNUC__)
26
+ #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
27
+ #else
28
+ #define CLOG_ARGUMENTS_FORMAT
29
+ #endif
30
+ #endif
31
+
32
+ #ifdef __cplusplus
33
+ extern "C" {
34
+ #endif
35
+
36
+ CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args);
37
+ CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args);
38
+ CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args);
39
+ CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args);
40
+ CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args);
41
+
42
+ #define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \
43
+ CLOG_ARGUMENTS_FORMAT \
44
+ inline static void log_debug_function_name(const char* format, ...) { \
45
+ if (level >= CLOG_DEBUG) { \
46
+ va_list args; \
47
+ va_start(args, format); \
48
+ clog_vlog_debug(module, format, args); \
49
+ va_end(args); \
50
+ } \
51
+ }
52
+
53
+ #define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \
54
+ CLOG_ARGUMENTS_FORMAT \
55
+ inline static void log_info_function_name(const char* format, ...) { \
56
+ if (level >= CLOG_INFO) { \
57
+ va_list args; \
58
+ va_start(args, format); \
59
+ clog_vlog_info(module, format, args); \
60
+ va_end(args); \
61
+ } \
62
+ }
63
+
64
+ #define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \
65
+ CLOG_ARGUMENTS_FORMAT \
66
+ inline static void log_warning_function_name(const char* format, ...) { \
67
+ if (level >= CLOG_WARNING) { \
68
+ va_list args; \
69
+ va_start(args, format); \
70
+ clog_vlog_warning(module, format, args); \
71
+ va_end(args); \
72
+ } \
73
+ }
74
+
75
+ #define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \
76
+ CLOG_ARGUMENTS_FORMAT \
77
+ inline static void log_error_function_name(const char* format, ...) { \
78
+ if (level >= CLOG_ERROR) { \
79
+ va_list args; \
80
+ va_start(args, format); \
81
+ clog_vlog_error(module, format, args); \
82
+ va_end(args); \
83
+ } \
84
+ }
85
+
86
+ #define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \
87
+ CLOG_ARGUMENTS_FORMAT \
88
+ inline static void log_fatal_function_name(const char* format, ...) { \
89
+ if (level >= CLOG_FATAL) { \
90
+ va_list args; \
91
+ va_start(args, format); \
92
+ clog_vlog_fatal(module, format, args); \
93
+ va_end(args); \
94
+ } \
95
+ abort(); \
96
+ }
97
+
98
+ #ifdef __cplusplus
99
+ } /* extern "C" */
100
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/cpuinfo.h ADDED
@@ -0,0 +1,1939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef CPUINFO_H
3
+ #define CPUINFO_H
4
+
5
+ #ifndef __cplusplus
6
+ #include <stdbool.h>
7
+ #endif
8
+
9
+ #ifdef __APPLE__
10
+ #include <TargetConditionals.h>
11
+ #endif
12
+
13
+ #include <stdint.h>
14
+
15
+ /* Identify architecture and define corresponding macro */
16
+
17
+ #if defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86)
18
+ #define CPUINFO_ARCH_X86 1
19
+ #endif
20
+
21
+ #if defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
22
+ #define CPUINFO_ARCH_X86_64 1
23
+ #endif
24
+
25
+ #if defined(__arm__) || defined(_M_ARM)
26
+ #define CPUINFO_ARCH_ARM 1
27
+ #endif
28
+
29
+ #if defined(__aarch64__) || defined(_M_ARM64)
30
+ #define CPUINFO_ARCH_ARM64 1
31
+ #endif
32
+
33
+ #if defined(__PPC64__) || defined(__powerpc64__) || defined(_ARCH_PPC64)
34
+ #define CPUINFO_ARCH_PPC64 1
35
+ #endif
36
+
37
+ #if defined(__asmjs__)
38
+ #define CPUINFO_ARCH_ASMJS 1
39
+ #endif
40
+
41
+ #if defined(__wasm__)
42
+ #if defined(__wasm_simd128__)
43
+ #define CPUINFO_ARCH_WASMSIMD 1
44
+ #else
45
+ #define CPUINFO_ARCH_WASM 1
46
+ #endif
47
+ #endif
48
+
49
+ /* Define other architecture-specific macros as 0 */
50
+
51
+ #ifndef CPUINFO_ARCH_X86
52
+ #define CPUINFO_ARCH_X86 0
53
+ #endif
54
+
55
+ #ifndef CPUINFO_ARCH_X86_64
56
+ #define CPUINFO_ARCH_X86_64 0
57
+ #endif
58
+
59
+ #ifndef CPUINFO_ARCH_ARM
60
+ #define CPUINFO_ARCH_ARM 0
61
+ #endif
62
+
63
+ #ifndef CPUINFO_ARCH_ARM64
64
+ #define CPUINFO_ARCH_ARM64 0
65
+ #endif
66
+
67
+ #ifndef CPUINFO_ARCH_PPC64
68
+ #define CPUINFO_ARCH_PPC64 0
69
+ #endif
70
+
71
+ #ifndef CPUINFO_ARCH_ASMJS
72
+ #define CPUINFO_ARCH_ASMJS 0
73
+ #endif
74
+
75
+ #ifndef CPUINFO_ARCH_WASM
76
+ #define CPUINFO_ARCH_WASM 0
77
+ #endif
78
+
79
+ #ifndef CPUINFO_ARCH_WASMSIMD
80
+ #define CPUINFO_ARCH_WASMSIMD 0
81
+ #endif
82
+
83
+ #if CPUINFO_ARCH_X86 && defined(_MSC_VER)
84
+ #define CPUINFO_ABI __cdecl
85
+ #elif CPUINFO_ARCH_X86 && defined(__GNUC__)
86
+ #define CPUINFO_ABI __attribute__((__cdecl__))
87
+ #else
88
+ #define CPUINFO_ABI
89
+ #endif
90
+
91
+ #define CPUINFO_CACHE_UNIFIED 0x00000001
92
+ #define CPUINFO_CACHE_INCLUSIVE 0x00000002
93
+ #define CPUINFO_CACHE_COMPLEX_INDEXING 0x00000004
94
+
95
+ struct cpuinfo_cache {
96
+ /** Cache size in bytes */
97
+ uint32_t size;
98
+ /** Number of ways of associativity */
99
+ uint32_t associativity;
100
+ /** Number of sets */
101
+ uint32_t sets;
102
+ /** Number of partitions */
103
+ uint32_t partitions;
104
+ /** Line size in bytes */
105
+ uint32_t line_size;
106
+ /**
107
+ * Binary characteristics of the cache (unified cache, inclusive cache, cache with complex indexing).
108
+ *
109
+ * @see CPUINFO_CACHE_UNIFIED, CPUINFO_CACHE_INCLUSIVE, CPUINFO_CACHE_COMPLEX_INDEXING
110
+ */
111
+ uint32_t flags;
112
+ /** Index of the first logical processor that shares this cache */
113
+ uint32_t processor_start;
114
+ /** Number of logical processors that share this cache */
115
+ uint32_t processor_count;
116
+ };
117
+
118
+ struct cpuinfo_trace_cache {
119
+ uint32_t uops;
120
+ uint32_t associativity;
121
+ };
122
+
123
+ #define CPUINFO_PAGE_SIZE_4KB 0x1000
124
+ #define CPUINFO_PAGE_SIZE_1MB 0x100000
125
+ #define CPUINFO_PAGE_SIZE_2MB 0x200000
126
+ #define CPUINFO_PAGE_SIZE_4MB 0x400000
127
+ #define CPUINFO_PAGE_SIZE_16MB 0x1000000
128
+ #define CPUINFO_PAGE_SIZE_1GB 0x40000000
129
+
130
+ struct cpuinfo_tlb {
131
+ uint32_t entries;
132
+ uint32_t associativity;
133
+ uint64_t pages;
134
+ };
135
+
136
+ /** Vendor of processor core design */
137
+ enum cpuinfo_vendor {
138
+ /** Processor vendor is not known to the library, or the library failed to get vendor information from the OS. */
139
+ cpuinfo_vendor_unknown = 0,
140
+
141
+ /* Active vendors of modern CPUs */
142
+
143
+ /**
144
+ * Intel Corporation. Vendor of x86, x86-64, IA64, and ARM processor microarchitectures.
145
+ *
146
+ * Sold its ARM design subsidiary in 2006. The last ARM processor design was released in 2004.
147
+ */
148
+ cpuinfo_vendor_intel = 1,
149
+ /** Advanced Micro Devices, Inc. Vendor of x86 and x86-64 processor microarchitectures. */
150
+ cpuinfo_vendor_amd = 2,
151
+ /** ARM Holdings plc. Vendor of ARM and ARM64 processor microarchitectures. */
152
+ cpuinfo_vendor_arm = 3,
153
+ /** Qualcomm Incorporated. Vendor of ARM and ARM64 processor microarchitectures. */
154
+ cpuinfo_vendor_qualcomm = 4,
155
+ /** Apple Inc. Vendor of ARM and ARM64 processor microarchitectures. */
156
+ cpuinfo_vendor_apple = 5,
157
+ /** Samsung Electronics Co., Ltd. Vendir if ARM64 processor microarchitectures. */
158
+ cpuinfo_vendor_samsung = 6,
159
+ /** Nvidia Corporation. Vendor of ARM64-compatible processor microarchitectures. */
160
+ cpuinfo_vendor_nvidia = 7,
161
+ /** MIPS Technologies, Inc. Vendor of MIPS processor microarchitectures. */
162
+ cpuinfo_vendor_mips = 8,
163
+ /** International Business Machines Corporation. Vendor of PowerPC processor microarchitectures. */
164
+ cpuinfo_vendor_ibm = 9,
165
+ /** Ingenic Semiconductor. Vendor of MIPS processor microarchitectures. */
166
+ cpuinfo_vendor_ingenic = 10,
167
+ /**
168
+ * VIA Technologies, Inc. Vendor of x86 and x86-64 processor microarchitectures.
169
+ *
170
+ * Processors are designed by Centaur Technology, a subsidiary of VIA Technologies.
171
+ */
172
+ cpuinfo_vendor_via = 11,
173
+ /** Cavium, Inc. Vendor of ARM64 processor microarchitectures. */
174
+ cpuinfo_vendor_cavium = 12,
175
+ /** Broadcom, Inc. Vendor of ARM processor microarchitectures. */
176
+ cpuinfo_vendor_broadcom = 13,
177
+ /** Applied Micro Circuits Corporation (APM). Vendor of ARM64 processor microarchitectures. */
178
+ cpuinfo_vendor_apm = 14,
179
+ /**
180
+ * Huawei Technologies Co., Ltd. Vendor of ARM64 processor microarchitectures.
181
+ *
182
+ * Processors are designed by HiSilicon, a subsidiary of Huawei.
183
+ */
184
+ cpuinfo_vendor_huawei = 15,
185
+ /**
186
+ * Hygon (Chengdu Haiguang Integrated Circuit Design Co., Ltd), Vendor of x86-64 processor microarchitectures.
187
+ *
188
+ * Processors are variants of AMD cores.
189
+ */
190
+ cpuinfo_vendor_hygon = 16,
191
+
192
+ /* Active vendors of embedded CPUs */
193
+
194
+ /** Texas Instruments Inc. Vendor of ARM processor microarchitectures. */
195
+ cpuinfo_vendor_texas_instruments = 30,
196
+ /** Marvell Technology Group Ltd. Vendor of ARM processor microarchitectures. */
197
+ cpuinfo_vendor_marvell = 31,
198
+ /** RDC Semiconductor Co., Ltd. Vendor of x86 processor microarchitectures. */
199
+ cpuinfo_vendor_rdc = 32,
200
+ /** DM&P Electronics Inc. Vendor of x86 processor microarchitectures. */
201
+ cpuinfo_vendor_dmp = 33,
202
+ /** Motorola, Inc. Vendor of PowerPC and ARM processor microarchitectures. */
203
+ cpuinfo_vendor_motorola = 34,
204
+
205
+ /* Defunct CPU vendors */
206
+
207
+ /**
208
+ * Transmeta Corporation. Vendor of x86 processor microarchitectures.
209
+ *
210
+ * Now defunct. The last processor design was released in 2004.
211
+ * Transmeta processors implemented VLIW ISA and used binary translation to execute x86 code.
212
+ */
213
+ cpuinfo_vendor_transmeta = 50,
214
+ /**
215
+ * Cyrix Corporation. Vendor of x86 processor microarchitectures.
216
+ *
217
+ * Now defunct. The last processor design was released in 1996.
218
+ */
219
+ cpuinfo_vendor_cyrix = 51,
220
+ /**
221
+ * Rise Technology. Vendor of x86 processor microarchitectures.
222
+ *
223
+ * Now defunct. The last processor design was released in 1999.
224
+ */
225
+ cpuinfo_vendor_rise = 52,
226
+ /**
227
+ * National Semiconductor. Vendor of x86 processor microarchitectures.
228
+ *
229
+ * Sold its x86 design subsidiary in 1999. The last processor design was released in 1998.
230
+ */
231
+ cpuinfo_vendor_nsc = 53,
232
+ /**
233
+ * Silicon Integrated Systems. Vendor of x86 processor microarchitectures.
234
+ *
235
+ * Sold its x86 design subsidiary in 2001. The last processor design was released in 2001.
236
+ */
237
+ cpuinfo_vendor_sis = 54,
238
+ /**
239
+ * NexGen. Vendor of x86 processor microarchitectures.
240
+ *
241
+ * Now defunct. The last processor design was released in 1994.
242
+ * NexGen designed the first x86 microarchitecture which decomposed x86 instructions into simple microoperations.
243
+ */
244
+ cpuinfo_vendor_nexgen = 55,
245
+ /**
246
+ * United Microelectronics Corporation. Vendor of x86 processor microarchitectures.
247
+ *
248
+ * Ceased x86 in the early 1990s. The last processor design was released in 1991.
249
+ * Designed U5C and U5D processors. Both are 486 level.
250
+ */
251
+ cpuinfo_vendor_umc = 56,
252
+ /**
253
+ * Digital Equipment Corporation. Vendor of ARM processor microarchitecture.
254
+ *
255
+ * Sold its ARM designs in 1997. The last processor design was released in 1997.
256
+ */
257
+ cpuinfo_vendor_dec = 57,
258
+ };
259
+
260
+ /**
261
+ * Processor microarchitecture
262
+ *
263
+ * Processors with different microarchitectures often have different instruction performance characteristics,
264
+ * and may have dramatically different pipeline organization.
265
+ */
266
+ enum cpuinfo_uarch {
267
+ /** Microarchitecture is unknown, or the library failed to get information about the microarchitecture from OS */
268
+ cpuinfo_uarch_unknown = 0,
269
+
270
+ /** Pentium and Pentium MMX microarchitecture. */
271
+ cpuinfo_uarch_p5 = 0x00100100,
272
+ /** Intel Quark microarchitecture. */
273
+ cpuinfo_uarch_quark = 0x00100101,
274
+
275
+ /** Pentium Pro, Pentium II, and Pentium III. */
276
+ cpuinfo_uarch_p6 = 0x00100200,
277
+ /** Pentium M. */
278
+ cpuinfo_uarch_dothan = 0x00100201,
279
+ /** Intel Core microarchitecture. */
280
+ cpuinfo_uarch_yonah = 0x00100202,
281
+ /** Intel Core 2 microarchitecture on 65 nm process. */
282
+ cpuinfo_uarch_conroe = 0x00100203,
283
+ /** Intel Core 2 microarchitecture on 45 nm process. */
284
+ cpuinfo_uarch_penryn = 0x00100204,
285
+ /** Intel Nehalem and Westmere microarchitectures (Core i3/i5/i7 1st gen). */
286
+ cpuinfo_uarch_nehalem = 0x00100205,
287
+ /** Intel Sandy Bridge microarchitecture (Core i3/i5/i7 2nd gen). */
288
+ cpuinfo_uarch_sandy_bridge = 0x00100206,
289
+ /** Intel Ivy Bridge microarchitecture (Core i3/i5/i7 3rd gen). */
290
+ cpuinfo_uarch_ivy_bridge = 0x00100207,
291
+ /** Intel Haswell microarchitecture (Core i3/i5/i7 4th gen). */
292
+ cpuinfo_uarch_haswell = 0x00100208,
293
+ /** Intel Broadwell microarchitecture. */
294
+ cpuinfo_uarch_broadwell = 0x00100209,
295
+ /** Intel Sky Lake microarchitecture (14 nm, including Kaby/Coffee/Whiskey/Amber/Comet/Cascade/Cooper Lake). */
296
+ cpuinfo_uarch_sky_lake = 0x0010020A,
297
+ /** DEPRECATED (Intel Kaby Lake microarchitecture). */
298
+ cpuinfo_uarch_kaby_lake = 0x0010020A,
299
+ /** Intel Palm Cove microarchitecture (10 nm, Cannon Lake). */
300
+ cpuinfo_uarch_palm_cove = 0x0010020B,
301
+ /** Intel Sunny Cove microarchitecture (10 nm, Ice Lake). */
302
+ cpuinfo_uarch_sunny_cove = 0x0010020C,
303
+
304
+ /** Pentium 4 with Willamette, Northwood, or Foster cores. */
305
+ cpuinfo_uarch_willamette = 0x00100300,
306
+ /** Pentium 4 with Prescott and later cores. */
307
+ cpuinfo_uarch_prescott = 0x00100301,
308
+
309
+ /** Intel Atom on 45 nm process. */
310
+ cpuinfo_uarch_bonnell = 0x00100400,
311
+ /** Intel Atom on 32 nm process. */
312
+ cpuinfo_uarch_saltwell = 0x00100401,
313
+ /** Intel Silvermont microarchitecture (22 nm out-of-order Atom). */
314
+ cpuinfo_uarch_silvermont = 0x00100402,
315
+ /** Intel Airmont microarchitecture (14 nm out-of-order Atom). */
316
+ cpuinfo_uarch_airmont = 0x00100403,
317
+ /** Intel Goldmont microarchitecture (Denverton, Apollo Lake). */
318
+ cpuinfo_uarch_goldmont = 0x00100404,
319
+ /** Intel Goldmont Plus microarchitecture (Gemini Lake). */
320
+ cpuinfo_uarch_goldmont_plus = 0x00100405,
321
+
322
+ /** Intel Knights Ferry HPC boards. */
323
+ cpuinfo_uarch_knights_ferry = 0x00100500,
324
+ /** Intel Knights Corner HPC boards (aka Xeon Phi). */
325
+ cpuinfo_uarch_knights_corner = 0x00100501,
326
+ /** Intel Knights Landing microarchitecture (second-gen MIC). */
327
+ cpuinfo_uarch_knights_landing = 0x00100502,
328
+ /** Intel Knights Hill microarchitecture (third-gen MIC). */
329
+ cpuinfo_uarch_knights_hill = 0x00100503,
330
+ /** Intel Knights Mill Xeon Phi. */
331
+ cpuinfo_uarch_knights_mill = 0x00100504,
332
+
333
+ /** Intel/Marvell XScale series. */
334
+ cpuinfo_uarch_xscale = 0x00100600,
335
+
336
+ /** AMD K5. */
337
+ cpuinfo_uarch_k5 = 0x00200100,
338
+ /** AMD K6 and alike. */
339
+ cpuinfo_uarch_k6 = 0x00200101,
340
+ /** AMD Athlon and Duron. */
341
+ cpuinfo_uarch_k7 = 0x00200102,
342
+ /** AMD Athlon 64, Opteron 64. */
343
+ cpuinfo_uarch_k8 = 0x00200103,
344
+ /** AMD Family 10h (Barcelona, Istambul, Magny-Cours). */
345
+ cpuinfo_uarch_k10 = 0x00200104,
346
+ /**
347
+ * AMD Bulldozer microarchitecture
348
+ * Zambezi FX-series CPUs, Zurich, Valencia and Interlagos Opteron CPUs.
349
+ */
350
+ cpuinfo_uarch_bulldozer = 0x00200105,
351
+ /**
352
+ * AMD Piledriver microarchitecture
353
+ * Vishera FX-series CPUs, Trinity and Richland APUs, Delhi, Seoul, Abu Dhabi Opteron CPUs.
354
+ */
355
+ cpuinfo_uarch_piledriver = 0x00200106,
356
+ /** AMD Steamroller microarchitecture (Kaveri APUs). */
357
+ cpuinfo_uarch_steamroller = 0x00200107,
358
+ /** AMD Excavator microarchitecture (Carizzo APUs). */
359
+ cpuinfo_uarch_excavator = 0x00200108,
360
+ /** AMD Zen microarchitecture (12/14 nm Ryzen and EPYC CPUs). */
361
+ cpuinfo_uarch_zen = 0x00200109,
362
+ /** AMD Zen 2 microarchitecture (7 nm Ryzen and EPYC CPUs). */
363
+ cpuinfo_uarch_zen2 = 0x0020010A,
364
+ /** AMD Zen 3 microarchitecture. */
365
+ cpuinfo_uarch_zen3 = 0x0020010B,
366
+
367
+ /** NSC Geode and AMD Geode GX and LX. */
368
+ cpuinfo_uarch_geode = 0x00200200,
369
+ /** AMD Bobcat mobile microarchitecture. */
370
+ cpuinfo_uarch_bobcat = 0x00200201,
371
+ /** AMD Jaguar mobile microarchitecture. */
372
+ cpuinfo_uarch_jaguar = 0x00200202,
373
+ /** AMD Puma mobile microarchitecture. */
374
+ cpuinfo_uarch_puma = 0x00200203,
375
+
376
+ /** ARM7 series. */
377
+ cpuinfo_uarch_arm7 = 0x00300100,
378
+ /** ARM9 series. */
379
+ cpuinfo_uarch_arm9 = 0x00300101,
380
+ /** ARM 1136, ARM 1156, ARM 1176, or ARM 11MPCore. */
381
+ cpuinfo_uarch_arm11 = 0x00300102,
382
+
383
+ /** ARM Cortex-A5. */
384
+ cpuinfo_uarch_cortex_a5 = 0x00300205,
385
+ /** ARM Cortex-A7. */
386
+ cpuinfo_uarch_cortex_a7 = 0x00300207,
387
+ /** ARM Cortex-A8. */
388
+ cpuinfo_uarch_cortex_a8 = 0x00300208,
389
+ /** ARM Cortex-A9. */
390
+ cpuinfo_uarch_cortex_a9 = 0x00300209,
391
+ /** ARM Cortex-A12. */
392
+ cpuinfo_uarch_cortex_a12 = 0x00300212,
393
+ /** ARM Cortex-A15. */
394
+ cpuinfo_uarch_cortex_a15 = 0x00300215,
395
+ /** ARM Cortex-A17. */
396
+ cpuinfo_uarch_cortex_a17 = 0x00300217,
397
+
398
+ /** ARM Cortex-A32. */
399
+ cpuinfo_uarch_cortex_a32 = 0x00300332,
400
+ /** ARM Cortex-A35. */
401
+ cpuinfo_uarch_cortex_a35 = 0x00300335,
402
+ /** ARM Cortex-A53. */
403
+ cpuinfo_uarch_cortex_a53 = 0x00300353,
404
+ /** ARM Cortex-A55 revision 0 (restricted dual-issue capabilities compared to revision 1+). */
405
+ cpuinfo_uarch_cortex_a55r0 = 0x00300354,
406
+ /** ARM Cortex-A55. */
407
+ cpuinfo_uarch_cortex_a55 = 0x00300355,
408
+ /** ARM Cortex-A57. */
409
+ cpuinfo_uarch_cortex_a57 = 0x00300357,
410
+ /** ARM Cortex-A65. */
411
+ cpuinfo_uarch_cortex_a65 = 0x00300365,
412
+ /** ARM Cortex-A72. */
413
+ cpuinfo_uarch_cortex_a72 = 0x00300372,
414
+ /** ARM Cortex-A73. */
415
+ cpuinfo_uarch_cortex_a73 = 0x00300373,
416
+ /** ARM Cortex-A75. */
417
+ cpuinfo_uarch_cortex_a75 = 0x00300375,
418
+ /** ARM Cortex-A76. */
419
+ cpuinfo_uarch_cortex_a76 = 0x00300376,
420
+ /** ARM Cortex-A77. */
421
+ cpuinfo_uarch_cortex_a77 = 0x00300377,
422
+ /** ARM Cortex-A78. */
423
+ cpuinfo_uarch_cortex_a78 = 0x00300378,
424
+
425
+ /** ARM Neoverse N1. */
426
+ cpuinfo_uarch_neoverse_n1 = 0x00300400,
427
+ /** ARM Neoverse E1. */
428
+ cpuinfo_uarch_neoverse_e1 = 0x00300401,
429
+ /** ARM Neoverse V1. */
430
+ cpuinfo_uarch_neoverse_v1 = 0x00300402,
431
+ /** ARM Neoverse N2. */
432
+ cpuinfo_uarch_neoverse_n2 = 0x00300403,
433
+
434
+ /** ARM Cortex-X1. */
435
+ cpuinfo_uarch_cortex_x1 = 0x00300501,
436
+ /** ARM Cortex-X2. */
437
+ cpuinfo_uarch_cortex_x2 = 0x00300502,
438
+
439
+ /** ARM Cortex-A510. */
440
+ cpuinfo_uarch_cortex_a510 = 0x00300551,
441
+ /** ARM Cortex-A710. */
442
+ cpuinfo_uarch_cortex_a710 = 0x00300571,
443
+
444
+ /** Qualcomm Scorpion. */
445
+ cpuinfo_uarch_scorpion = 0x00400100,
446
+ /** Qualcomm Krait. */
447
+ cpuinfo_uarch_krait = 0x00400101,
448
+ /** Qualcomm Kryo. */
449
+ cpuinfo_uarch_kryo = 0x00400102,
450
+ /** Qualcomm Falkor. */
451
+ cpuinfo_uarch_falkor = 0x00400103,
452
+ /** Qualcomm Saphira. */
453
+ cpuinfo_uarch_saphira = 0x00400104,
454
+
455
+ /** Nvidia Denver. */
456
+ cpuinfo_uarch_denver = 0x00500100,
457
+ /** Nvidia Denver 2. */
458
+ cpuinfo_uarch_denver2 = 0x00500101,
459
+ /** Nvidia Carmel. */
460
+ cpuinfo_uarch_carmel = 0x00500102,
461
+
462
+ /** Samsung Exynos M1 (Exynos 8890 big cores). */
463
+ cpuinfo_uarch_exynos_m1 = 0x00600100,
464
+ /** Samsung Exynos M2 (Exynos 8895 big cores). */
465
+ cpuinfo_uarch_exynos_m2 = 0x00600101,
466
+ /** Samsung Exynos M3 (Exynos 9810 big cores). */
467
+ cpuinfo_uarch_exynos_m3 = 0x00600102,
468
+ /** Samsung Exynos M4 (Exynos 9820 big cores). */
469
+ cpuinfo_uarch_exynos_m4 = 0x00600103,
470
+ /** Samsung Exynos M5 (Exynos 9830 big cores). */
471
+ cpuinfo_uarch_exynos_m5 = 0x00600104,
472
+
473
+ /* Deprecated synonym for Cortex-A76 */
474
+ cpuinfo_uarch_cortex_a76ae = 0x00300376,
475
+ /* Deprecated names for Exynos. */
476
+ cpuinfo_uarch_mongoose_m1 = 0x00600100,
477
+ cpuinfo_uarch_mongoose_m2 = 0x00600101,
478
+ cpuinfo_uarch_meerkat_m3 = 0x00600102,
479
+ cpuinfo_uarch_meerkat_m4 = 0x00600103,
480
+
481
+ /** Apple A6 and A6X processors. */
482
+ cpuinfo_uarch_swift = 0x00700100,
483
+ /** Apple A7 processor. */
484
+ cpuinfo_uarch_cyclone = 0x00700101,
485
+ /** Apple A8 and A8X processor. */
486
+ cpuinfo_uarch_typhoon = 0x00700102,
487
+ /** Apple A9 and A9X processor. */
488
+ cpuinfo_uarch_twister = 0x00700103,
489
+ /** Apple A10 and A10X processor. */
490
+ cpuinfo_uarch_hurricane = 0x00700104,
491
+ /** Apple A11 processor (big cores). */
492
+ cpuinfo_uarch_monsoon = 0x00700105,
493
+ /** Apple A11 processor (little cores). */
494
+ cpuinfo_uarch_mistral = 0x00700106,
495
+ /** Apple A12 processor (big cores). */
496
+ cpuinfo_uarch_vortex = 0x00700107,
497
+ /** Apple A12 processor (little cores). */
498
+ cpuinfo_uarch_tempest = 0x00700108,
499
+ /** Apple A13 processor (big cores). */
500
+ cpuinfo_uarch_lightning = 0x00700109,
501
+ /** Apple A13 processor (little cores). */
502
+ cpuinfo_uarch_thunder = 0x0070010A,
503
+ /** Apple A14 / M1 processor (big cores). */
504
+ cpuinfo_uarch_firestorm = 0x0070010B,
505
+ /** Apple A14 / M1 processor (little cores). */
506
+ cpuinfo_uarch_icestorm = 0x0070010C,
507
+ /** Apple A15 / M2 processor (big cores). */
508
+ cpuinfo_uarch_avalanche = 0x0070010D,
509
+ /** Apple A15 / M2 processor (little cores). */
510
+ cpuinfo_uarch_blizzard = 0x0070010E,
511
+
512
+ /** Cavium ThunderX. */
513
+ cpuinfo_uarch_thunderx = 0x00800100,
514
+ /** Cavium ThunderX2 (originally Broadcom Vulkan). */
515
+ cpuinfo_uarch_thunderx2 = 0x00800200,
516
+
517
+ /** Marvell PJ4. */
518
+ cpuinfo_uarch_pj4 = 0x00900100,
519
+
520
+ /** Broadcom Brahma B15. */
521
+ cpuinfo_uarch_brahma_b15 = 0x00A00100,
522
+ /** Broadcom Brahma B53. */
523
+ cpuinfo_uarch_brahma_b53 = 0x00A00101,
524
+
525
+ /** Applied Micro X-Gene. */
526
+ cpuinfo_uarch_xgene = 0x00B00100,
527
+
528
+ /* Hygon Dhyana (a modification of AMD Zen for Chinese market). */
529
+ cpuinfo_uarch_dhyana = 0x01000100,
530
+
531
+ /** HiSilicon TaiShan v110 (Huawei Kunpeng 920 series processors). */
532
+ cpuinfo_uarch_taishan_v110 = 0x00C00100,
533
+ };
534
+
535
+ struct cpuinfo_processor {
536
+ /** SMT (hyperthread) ID within a core */
537
+ uint32_t smt_id;
538
+ /** Core containing this logical processor */
539
+ const struct cpuinfo_core* core;
540
+ /** Cluster of cores containing this logical processor */
541
+ const struct cpuinfo_cluster* cluster;
542
+ /** Physical package containing this logical processor */
543
+ const struct cpuinfo_package* package;
544
+ #if defined(__linux__)
545
+ /**
546
+ * Linux-specific ID for the logical processor:
547
+ * - Linux kernel exposes information about this logical processor in /sys/devices/system/cpu/cpu<linux_id>/
548
+ * - Bit <linux_id> in the cpu_set_t identifies this logical processor
549
+ */
550
+ int linux_id;
551
+ #endif
552
+ #if defined(_WIN32) || defined(__CYGWIN__)
553
+ /** Windows-specific ID for the group containing the logical processor. */
554
+ uint16_t windows_group_id;
555
+ /**
556
+ * Windows-specific ID of the logical processor within its group:
557
+ * - Bit <windows_processor_id> in the KAFFINITY mask identifies this logical processor within its group.
558
+ */
559
+ uint16_t windows_processor_id;
560
+ #endif
561
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
562
+ /** APIC ID (unique x86-specific ID of the logical processor) */
563
+ uint32_t apic_id;
564
+ #endif
565
+ struct {
566
+ /** Level 1 instruction cache */
567
+ const struct cpuinfo_cache* l1i;
568
+ /** Level 1 data cache */
569
+ const struct cpuinfo_cache* l1d;
570
+ /** Level 2 unified or data cache */
571
+ const struct cpuinfo_cache* l2;
572
+ /** Level 3 unified or data cache */
573
+ const struct cpuinfo_cache* l3;
574
+ /** Level 4 unified or data cache */
575
+ const struct cpuinfo_cache* l4;
576
+ } cache;
577
+ };
578
+
579
+ struct cpuinfo_core {
580
+ /** Index of the first logical processor on this core. */
581
+ uint32_t processor_start;
582
+ /** Number of logical processors on this core */
583
+ uint32_t processor_count;
584
+ /** Core ID within a package */
585
+ uint32_t core_id;
586
+ /** Cluster containing this core */
587
+ const struct cpuinfo_cluster* cluster;
588
+ /** Physical package containing this core. */
589
+ const struct cpuinfo_package* package;
590
+ /** Vendor of the CPU microarchitecture for this core */
591
+ enum cpuinfo_vendor vendor;
592
+ /** CPU microarchitecture for this core */
593
+ enum cpuinfo_uarch uarch;
594
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
595
+ /** Value of CPUID leaf 1 EAX register for this core */
596
+ uint32_t cpuid;
597
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
598
+ /** Value of Main ID Register (MIDR) for this core */
599
+ uint32_t midr;
600
+ #endif
601
+ /** Clock rate (non-Turbo) of the core, in Hz */
602
+ uint64_t frequency;
603
+ };
604
+
605
+ struct cpuinfo_cluster {
606
+ /** Index of the first logical processor in the cluster */
607
+ uint32_t processor_start;
608
+ /** Number of logical processors in the cluster */
609
+ uint32_t processor_count;
610
+ /** Index of the first core in the cluster */
611
+ uint32_t core_start;
612
+ /** Number of cores on the cluster */
613
+ uint32_t core_count;
614
+ /** Cluster ID within a package */
615
+ uint32_t cluster_id;
616
+ /** Physical package containing the cluster */
617
+ const struct cpuinfo_package* package;
618
+ /** CPU microarchitecture vendor of the cores in the cluster */
619
+ enum cpuinfo_vendor vendor;
620
+ /** CPU microarchitecture of the cores in the cluster */
621
+ enum cpuinfo_uarch uarch;
622
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
623
+ /** Value of CPUID leaf 1 EAX register of the cores in the cluster */
624
+ uint32_t cpuid;
625
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
626
+ /** Value of Main ID Register (MIDR) of the cores in the cluster */
627
+ uint32_t midr;
628
+ #endif
629
+ /** Clock rate (non-Turbo) of the cores in the cluster, in Hz */
630
+ uint64_t frequency;
631
+ };
632
+
633
+ #define CPUINFO_PACKAGE_NAME_MAX 48
634
+
635
+ struct cpuinfo_package {
636
+ /** SoC or processor chip model name */
637
+ char name[CPUINFO_PACKAGE_NAME_MAX];
638
+ /** Index of the first logical processor on this physical package */
639
+ uint32_t processor_start;
640
+ /** Number of logical processors on this physical package */
641
+ uint32_t processor_count;
642
+ /** Index of the first core on this physical package */
643
+ uint32_t core_start;
644
+ /** Number of cores on this physical package */
645
+ uint32_t core_count;
646
+ /** Index of the first cluster of cores on this physical package */
647
+ uint32_t cluster_start;
648
+ /** Number of clusters of cores on this physical package */
649
+ uint32_t cluster_count;
650
+ };
651
+
652
+ struct cpuinfo_uarch_info {
653
+ /** Type of CPU microarchitecture */
654
+ enum cpuinfo_uarch uarch;
655
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
656
+ /** Value of CPUID leaf 1 EAX register for the microarchitecture */
657
+ uint32_t cpuid;
658
+ #elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
659
+ /** Value of Main ID Register (MIDR) for the microarchitecture */
660
+ uint32_t midr;
661
+ #endif
662
+ /** Number of logical processors with the microarchitecture */
663
+ uint32_t processor_count;
664
+ /** Number of cores with the microarchitecture */
665
+ uint32_t core_count;
666
+ };
667
+
668
+ #ifdef __cplusplus
669
+ extern "C" {
670
+ #endif
671
+
672
+ bool CPUINFO_ABI cpuinfo_initialize(void);
673
+
674
+ void CPUINFO_ABI cpuinfo_deinitialize(void);
675
+
676
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
677
+ /* This structure is not a part of stable API. Use cpuinfo_has_x86_* functions instead. */
678
+ struct cpuinfo_x86_isa {
679
+ #if CPUINFO_ARCH_X86
680
+ bool rdtsc;
681
+ #endif
682
+ bool rdtscp;
683
+ bool rdpid;
684
+ bool sysenter;
685
+ #if CPUINFO_ARCH_X86
686
+ bool syscall;
687
+ #endif
688
+ bool msr;
689
+ bool clzero;
690
+ bool clflush;
691
+ bool clflushopt;
692
+ bool mwait;
693
+ bool mwaitx;
694
+ #if CPUINFO_ARCH_X86
695
+ bool emmx;
696
+ #endif
697
+ bool fxsave;
698
+ bool xsave;
699
+ #if CPUINFO_ARCH_X86
700
+ bool fpu;
701
+ bool mmx;
702
+ bool mmx_plus;
703
+ #endif
704
+ bool three_d_now;
705
+ bool three_d_now_plus;
706
+ #if CPUINFO_ARCH_X86
707
+ bool three_d_now_geode;
708
+ #endif
709
+ bool prefetch;
710
+ bool prefetchw;
711
+ bool prefetchwt1;
712
+ #if CPUINFO_ARCH_X86
713
+ bool daz;
714
+ bool sse;
715
+ bool sse2;
716
+ #endif
717
+ bool sse3;
718
+ bool ssse3;
719
+ bool sse4_1;
720
+ bool sse4_2;
721
+ bool sse4a;
722
+ bool misaligned_sse;
723
+ bool avx;
724
+ bool fma3;
725
+ bool fma4;
726
+ bool xop;
727
+ bool f16c;
728
+ bool avx2;
729
+ bool avx512f;
730
+ bool avx512pf;
731
+ bool avx512er;
732
+ bool avx512cd;
733
+ bool avx512dq;
734
+ bool avx512bw;
735
+ bool avx512vl;
736
+ bool avx512ifma;
737
+ bool avx512vbmi;
738
+ bool avx512vbmi2;
739
+ bool avx512bitalg;
740
+ bool avx512vpopcntdq;
741
+ bool avx512vnni;
742
+ bool avx512bf16;
743
+ bool avx512fp16;
744
+ bool avx512vp2intersect;
745
+ bool avx512_4vnniw;
746
+ bool avx512_4fmaps;
747
+ bool hle;
748
+ bool rtm;
749
+ bool xtest;
750
+ bool mpx;
751
+ #if CPUINFO_ARCH_X86
752
+ bool cmov;
753
+ bool cmpxchg8b;
754
+ #endif
755
+ bool cmpxchg16b;
756
+ bool clwb;
757
+ bool movbe;
758
+ #if CPUINFO_ARCH_X86_64
759
+ bool lahf_sahf;
760
+ #endif
761
+ bool fs_gs_base;
762
+ bool lzcnt;
763
+ bool popcnt;
764
+ bool tbm;
765
+ bool bmi;
766
+ bool bmi2;
767
+ bool adx;
768
+ bool aes;
769
+ bool vaes;
770
+ bool pclmulqdq;
771
+ bool vpclmulqdq;
772
+ bool gfni;
773
+ bool rdrand;
774
+ bool rdseed;
775
+ bool sha;
776
+ bool rng;
777
+ bool ace;
778
+ bool ace2;
779
+ bool phe;
780
+ bool pmm;
781
+ bool lwp;
782
+ };
783
+
784
+ extern struct cpuinfo_x86_isa cpuinfo_isa;
785
+ #endif
786
+
787
+ static inline bool cpuinfo_has_x86_rdtsc(void) {
788
+ #if CPUINFO_ARCH_X86_64
789
+ return true;
790
+ #elif CPUINFO_ARCH_X86
791
+ #if defined(__ANDROID__)
792
+ return true;
793
+ #else
794
+ return cpuinfo_isa.rdtsc;
795
+ #endif
796
+ #else
797
+ return false;
798
+ #endif
799
+ }
800
+
801
+ static inline bool cpuinfo_has_x86_rdtscp(void) {
802
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
803
+ return cpuinfo_isa.rdtscp;
804
+ #else
805
+ return false;
806
+ #endif
807
+ }
808
+
809
+ static inline bool cpuinfo_has_x86_rdpid(void) {
810
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
811
+ return cpuinfo_isa.rdpid;
812
+ #else
813
+ return false;
814
+ #endif
815
+ }
816
+
817
+ static inline bool cpuinfo_has_x86_clzero(void) {
818
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
819
+ return cpuinfo_isa.clzero;
820
+ #else
821
+ return false;
822
+ #endif
823
+ }
824
+
825
+ static inline bool cpuinfo_has_x86_mwait(void) {
826
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
827
+ return cpuinfo_isa.mwait;
828
+ #else
829
+ return false;
830
+ #endif
831
+ }
832
+
833
+ static inline bool cpuinfo_has_x86_mwaitx(void) {
834
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
835
+ return cpuinfo_isa.mwaitx;
836
+ #else
837
+ return false;
838
+ #endif
839
+ }
840
+
841
+ static inline bool cpuinfo_has_x86_fxsave(void) {
842
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
843
+ return cpuinfo_isa.fxsave;
844
+ #else
845
+ return false;
846
+ #endif
847
+ }
848
+
849
+ static inline bool cpuinfo_has_x86_xsave(void) {
850
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
851
+ return cpuinfo_isa.xsave;
852
+ #else
853
+ return false;
854
+ #endif
855
+ }
856
+
857
+ static inline bool cpuinfo_has_x86_fpu(void) {
858
+ #if CPUINFO_ARCH_X86_64
859
+ return true;
860
+ #elif CPUINFO_ARCH_X86
861
+ #if defined(__ANDROID__)
862
+ return true;
863
+ #else
864
+ return cpuinfo_isa.fpu;
865
+ #endif
866
+ #else
867
+ return false;
868
+ #endif
869
+ }
870
+
871
+ static inline bool cpuinfo_has_x86_mmx(void) {
872
+ #if CPUINFO_ARCH_X86_64
873
+ return true;
874
+ #elif CPUINFO_ARCH_X86
875
+ #if defined(__ANDROID__)
876
+ return true;
877
+ #else
878
+ return cpuinfo_isa.mmx;
879
+ #endif
880
+ #else
881
+ return false;
882
+ #endif
883
+ }
884
+
885
+ static inline bool cpuinfo_has_x86_mmx_plus(void) {
886
+ #if CPUINFO_ARCH_X86_64
887
+ return true;
888
+ #elif CPUINFO_ARCH_X86
889
+ #if defined(__ANDROID__)
890
+ return true;
891
+ #else
892
+ return cpuinfo_isa.mmx_plus;
893
+ #endif
894
+ #else
895
+ return false;
896
+ #endif
897
+ }
898
+
899
+ static inline bool cpuinfo_has_x86_3dnow(void) {
900
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
901
+ return cpuinfo_isa.three_d_now;
902
+ #else
903
+ return false;
904
+ #endif
905
+ }
906
+
907
+ static inline bool cpuinfo_has_x86_3dnow_plus(void) {
908
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
909
+ return cpuinfo_isa.three_d_now_plus;
910
+ #else
911
+ return false;
912
+ #endif
913
+ }
914
+
915
+ static inline bool cpuinfo_has_x86_3dnow_geode(void) {
916
+ #if CPUINFO_ARCH_X86_64
917
+ return false;
918
+ #elif CPUINFO_ARCH_X86
919
+ #if defined(__ANDROID__)
920
+ return false;
921
+ #else
922
+ return cpuinfo_isa.three_d_now_geode;
923
+ #endif
924
+ #else
925
+ return false;
926
+ #endif
927
+ }
928
+
929
+ static inline bool cpuinfo_has_x86_prefetch(void) {
930
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
931
+ return cpuinfo_isa.prefetch;
932
+ #else
933
+ return false;
934
+ #endif
935
+ }
936
+
937
+ static inline bool cpuinfo_has_x86_prefetchw(void) {
938
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
939
+ return cpuinfo_isa.prefetchw;
940
+ #else
941
+ return false;
942
+ #endif
943
+ }
944
+
945
+ static inline bool cpuinfo_has_x86_prefetchwt1(void) {
946
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
947
+ return cpuinfo_isa.prefetchwt1;
948
+ #else
949
+ return false;
950
+ #endif
951
+ }
952
+
953
+ static inline bool cpuinfo_has_x86_daz(void) {
954
+ #if CPUINFO_ARCH_X86_64
955
+ return true;
956
+ #elif CPUINFO_ARCH_X86
957
+ #if defined(__ANDROID__)
958
+ return true;
959
+ #else
960
+ return cpuinfo_isa.daz;
961
+ #endif
962
+ #else
963
+ return false;
964
+ #endif
965
+ }
966
+
967
+ static inline bool cpuinfo_has_x86_sse(void) {
968
+ #if CPUINFO_ARCH_X86_64
969
+ return true;
970
+ #elif CPUINFO_ARCH_X86
971
+ #if defined(__ANDROID__)
972
+ return true;
973
+ #else
974
+ return cpuinfo_isa.sse;
975
+ #endif
976
+ #else
977
+ return false;
978
+ #endif
979
+ }
980
+
981
+ static inline bool cpuinfo_has_x86_sse2(void) {
982
+ #if CPUINFO_ARCH_X86_64
983
+ return true;
984
+ #elif CPUINFO_ARCH_X86
985
+ #if defined(__ANDROID__)
986
+ return true;
987
+ #else
988
+ return cpuinfo_isa.sse2;
989
+ #endif
990
+ #else
991
+ return false;
992
+ #endif
993
+ }
994
+
995
+ static inline bool cpuinfo_has_x86_sse3(void) {
996
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
997
+ #if defined(__ANDROID__)
998
+ return true;
999
+ #else
1000
+ return cpuinfo_isa.sse3;
1001
+ #endif
1002
+ #else
1003
+ return false;
1004
+ #endif
1005
+ }
1006
+
1007
+ static inline bool cpuinfo_has_x86_ssse3(void) {
1008
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1009
+ #if defined(__ANDROID__)
1010
+ return true;
1011
+ #else
1012
+ return cpuinfo_isa.ssse3;
1013
+ #endif
1014
+ #else
1015
+ return false;
1016
+ #endif
1017
+ }
1018
+
1019
+ static inline bool cpuinfo_has_x86_sse4_1(void) {
1020
+ #if CPUINFO_ARCH_X86_64
1021
+ #if defined(__ANDROID__)
1022
+ return true;
1023
+ #else
1024
+ return cpuinfo_isa.sse4_1;
1025
+ #endif
1026
+ #elif CPUINFO_ARCH_X86
1027
+ return cpuinfo_isa.sse4_1;
1028
+ #else
1029
+ return false;
1030
+ #endif
1031
+ }
1032
+
1033
+ static inline bool cpuinfo_has_x86_sse4_2(void) {
1034
+ #if CPUINFO_ARCH_X86_64
1035
+ #if defined(__ANDROID__)
1036
+ return true;
1037
+ #else
1038
+ return cpuinfo_isa.sse4_2;
1039
+ #endif
1040
+ #elif CPUINFO_ARCH_X86
1041
+ return cpuinfo_isa.sse4_2;
1042
+ #else
1043
+ return false;
1044
+ #endif
1045
+ }
1046
+
1047
+ static inline bool cpuinfo_has_x86_sse4a(void) {
1048
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1049
+ return cpuinfo_isa.sse4a;
1050
+ #else
1051
+ return false;
1052
+ #endif
1053
+ }
1054
+
1055
+ static inline bool cpuinfo_has_x86_misaligned_sse(void) {
1056
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1057
+ return cpuinfo_isa.misaligned_sse;
1058
+ #else
1059
+ return false;
1060
+ #endif
1061
+ }
1062
+
1063
+ static inline bool cpuinfo_has_x86_avx(void) {
1064
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1065
+ return cpuinfo_isa.avx;
1066
+ #else
1067
+ return false;
1068
+ #endif
1069
+ }
1070
+
1071
+ static inline bool cpuinfo_has_x86_fma3(void) {
1072
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1073
+ return cpuinfo_isa.fma3;
1074
+ #else
1075
+ return false;
1076
+ #endif
1077
+ }
1078
+
1079
+ static inline bool cpuinfo_has_x86_fma4(void) {
1080
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1081
+ return cpuinfo_isa.fma4;
1082
+ #else
1083
+ return false;
1084
+ #endif
1085
+ }
1086
+
1087
+ static inline bool cpuinfo_has_x86_xop(void) {
1088
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1089
+ return cpuinfo_isa.xop;
1090
+ #else
1091
+ return false;
1092
+ #endif
1093
+ }
1094
+
1095
+ static inline bool cpuinfo_has_x86_f16c(void) {
1096
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1097
+ return cpuinfo_isa.f16c;
1098
+ #else
1099
+ return false;
1100
+ #endif
1101
+ }
1102
+
1103
+ static inline bool cpuinfo_has_x86_avx2(void) {
1104
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1105
+ return cpuinfo_isa.avx2;
1106
+ #else
1107
+ return false;
1108
+ #endif
1109
+ }
1110
+
1111
+ static inline bool cpuinfo_has_x86_avx512f(void) {
1112
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1113
+ return cpuinfo_isa.avx512f;
1114
+ #else
1115
+ return false;
1116
+ #endif
1117
+ }
1118
+
1119
+ static inline bool cpuinfo_has_x86_avx512pf(void) {
1120
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1121
+ return cpuinfo_isa.avx512pf;
1122
+ #else
1123
+ return false;
1124
+ #endif
1125
+ }
1126
+
1127
+ static inline bool cpuinfo_has_x86_avx512er(void) {
1128
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1129
+ return cpuinfo_isa.avx512er;
1130
+ #else
1131
+ return false;
1132
+ #endif
1133
+ }
1134
+
1135
+ static inline bool cpuinfo_has_x86_avx512cd(void) {
1136
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1137
+ return cpuinfo_isa.avx512cd;
1138
+ #else
1139
+ return false;
1140
+ #endif
1141
+ }
1142
+
1143
+ static inline bool cpuinfo_has_x86_avx512dq(void) {
1144
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1145
+ return cpuinfo_isa.avx512dq;
1146
+ #else
1147
+ return false;
1148
+ #endif
1149
+ }
1150
+
1151
+ static inline bool cpuinfo_has_x86_avx512bw(void) {
1152
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1153
+ return cpuinfo_isa.avx512bw;
1154
+ #else
1155
+ return false;
1156
+ #endif
1157
+ }
1158
+
1159
+ static inline bool cpuinfo_has_x86_avx512vl(void) {
1160
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1161
+ return cpuinfo_isa.avx512vl;
1162
+ #else
1163
+ return false;
1164
+ #endif
1165
+ }
1166
+
1167
+ static inline bool cpuinfo_has_x86_avx512ifma(void) {
1168
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1169
+ return cpuinfo_isa.avx512ifma;
1170
+ #else
1171
+ return false;
1172
+ #endif
1173
+ }
1174
+
1175
+ static inline bool cpuinfo_has_x86_avx512vbmi(void) {
1176
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1177
+ return cpuinfo_isa.avx512vbmi;
1178
+ #else
1179
+ return false;
1180
+ #endif
1181
+ }
1182
+
1183
+ static inline bool cpuinfo_has_x86_avx512vbmi2(void) {
1184
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1185
+ return cpuinfo_isa.avx512vbmi2;
1186
+ #else
1187
+ return false;
1188
+ #endif
1189
+ }
1190
+
1191
+ static inline bool cpuinfo_has_x86_avx512bitalg(void) {
1192
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1193
+ return cpuinfo_isa.avx512bitalg;
1194
+ #else
1195
+ return false;
1196
+ #endif
1197
+ }
1198
+
1199
+ static inline bool cpuinfo_has_x86_avx512vpopcntdq(void) {
1200
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1201
+ return cpuinfo_isa.avx512vpopcntdq;
1202
+ #else
1203
+ return false;
1204
+ #endif
1205
+ }
1206
+
1207
+ static inline bool cpuinfo_has_x86_avx512vnni(void) {
1208
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1209
+ return cpuinfo_isa.avx512vnni;
1210
+ #else
1211
+ return false;
1212
+ #endif
1213
+ }
1214
+
1215
+ static inline bool cpuinfo_has_x86_avx512bf16(void) {
1216
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1217
+ return cpuinfo_isa.avx512bf16;
1218
+ #else
1219
+ return false;
1220
+ #endif
1221
+ }
1222
+
1223
+ static inline bool cpuinfo_has_x86_avx512fp16(void) {
1224
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1225
+ return cpuinfo_isa.avx512fp16;
1226
+ #else
1227
+ return false;
1228
+ #endif
1229
+ }
1230
+
1231
+ static inline bool cpuinfo_has_x86_avx512vp2intersect(void) {
1232
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1233
+ return cpuinfo_isa.avx512vp2intersect;
1234
+ #else
1235
+ return false;
1236
+ #endif
1237
+ }
1238
+
1239
+ static inline bool cpuinfo_has_x86_avx512_4vnniw(void) {
1240
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1241
+ return cpuinfo_isa.avx512_4vnniw;
1242
+ #else
1243
+ return false;
1244
+ #endif
1245
+ }
1246
+
1247
+ static inline bool cpuinfo_has_x86_avx512_4fmaps(void) {
1248
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1249
+ return cpuinfo_isa.avx512_4fmaps;
1250
+ #else
1251
+ return false;
1252
+ #endif
1253
+ }
1254
+
1255
+ static inline bool cpuinfo_has_x86_hle(void) {
1256
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1257
+ return cpuinfo_isa.hle;
1258
+ #else
1259
+ return false;
1260
+ #endif
1261
+ }
1262
+
1263
+ static inline bool cpuinfo_has_x86_rtm(void) {
1264
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1265
+ return cpuinfo_isa.rtm;
1266
+ #else
1267
+ return false;
1268
+ #endif
1269
+ }
1270
+
1271
+ static inline bool cpuinfo_has_x86_xtest(void) {
1272
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1273
+ return cpuinfo_isa.xtest;
1274
+ #else
1275
+ return false;
1276
+ #endif
1277
+ }
1278
+
1279
+ static inline bool cpuinfo_has_x86_mpx(void) {
1280
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1281
+ return cpuinfo_isa.mpx;
1282
+ #else
1283
+ return false;
1284
+ #endif
1285
+ }
1286
+
1287
+ static inline bool cpuinfo_has_x86_cmov(void) {
1288
+ #if CPUINFO_ARCH_X86_64
1289
+ return true;
1290
+ #elif CPUINFO_ARCH_X86
1291
+ return cpuinfo_isa.cmov;
1292
+ #else
1293
+ return false;
1294
+ #endif
1295
+ }
1296
+
1297
+ static inline bool cpuinfo_has_x86_cmpxchg8b(void) {
1298
+ #if CPUINFO_ARCH_X86_64
1299
+ return true;
1300
+ #elif CPUINFO_ARCH_X86
1301
+ return cpuinfo_isa.cmpxchg8b;
1302
+ #else
1303
+ return false;
1304
+ #endif
1305
+ }
1306
+
1307
+ static inline bool cpuinfo_has_x86_cmpxchg16b(void) {
1308
+ #if CPUINFO_ARCH_X86_64
1309
+ return cpuinfo_isa.cmpxchg16b;
1310
+ #else
1311
+ return false;
1312
+ #endif
1313
+ }
1314
+
1315
+ static inline bool cpuinfo_has_x86_clwb(void) {
1316
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1317
+ return cpuinfo_isa.clwb;
1318
+ #else
1319
+ return false;
1320
+ #endif
1321
+ }
1322
+
1323
+ static inline bool cpuinfo_has_x86_movbe(void) {
1324
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1325
+ return cpuinfo_isa.movbe;
1326
+ #else
1327
+ return false;
1328
+ #endif
1329
+ }
1330
+
1331
+ static inline bool cpuinfo_has_x86_lahf_sahf(void) {
1332
+ #if CPUINFO_ARCH_X86
1333
+ return true;
1334
+ #elif CPUINFO_ARCH_X86_64
1335
+ return cpuinfo_isa.lahf_sahf;
1336
+ #else
1337
+ return false;
1338
+ #endif
1339
+ }
1340
+
1341
+ static inline bool cpuinfo_has_x86_lzcnt(void) {
1342
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1343
+ return cpuinfo_isa.lzcnt;
1344
+ #else
1345
+ return false;
1346
+ #endif
1347
+ }
1348
+
1349
+ static inline bool cpuinfo_has_x86_popcnt(void) {
1350
+ #if CPUINFO_ARCH_X86_64
1351
+ #if defined(__ANDROID__)
1352
+ return true;
1353
+ #else
1354
+ return cpuinfo_isa.popcnt;
1355
+ #endif
1356
+ #elif CPUINFO_ARCH_X86
1357
+ return cpuinfo_isa.popcnt;
1358
+ #else
1359
+ return false;
1360
+ #endif
1361
+ }
1362
+
1363
+ static inline bool cpuinfo_has_x86_tbm(void) {
1364
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1365
+ return cpuinfo_isa.tbm;
1366
+ #else
1367
+ return false;
1368
+ #endif
1369
+ }
1370
+
1371
+ static inline bool cpuinfo_has_x86_bmi(void) {
1372
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1373
+ return cpuinfo_isa.bmi;
1374
+ #else
1375
+ return false;
1376
+ #endif
1377
+ }
1378
+
1379
+ static inline bool cpuinfo_has_x86_bmi2(void) {
1380
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1381
+ return cpuinfo_isa.bmi2;
1382
+ #else
1383
+ return false;
1384
+ #endif
1385
+ }
1386
+
1387
+ static inline bool cpuinfo_has_x86_adx(void) {
1388
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1389
+ return cpuinfo_isa.adx;
1390
+ #else
1391
+ return false;
1392
+ #endif
1393
+ }
1394
+
1395
+ static inline bool cpuinfo_has_x86_aes(void) {
1396
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1397
+ return cpuinfo_isa.aes;
1398
+ #else
1399
+ return false;
1400
+ #endif
1401
+ }
1402
+
1403
+ static inline bool cpuinfo_has_x86_vaes(void) {
1404
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1405
+ return cpuinfo_isa.vaes;
1406
+ #else
1407
+ return false;
1408
+ #endif
1409
+ }
1410
+
1411
+ static inline bool cpuinfo_has_x86_pclmulqdq(void) {
1412
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1413
+ return cpuinfo_isa.pclmulqdq;
1414
+ #else
1415
+ return false;
1416
+ #endif
1417
+ }
1418
+
1419
+ static inline bool cpuinfo_has_x86_vpclmulqdq(void) {
1420
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1421
+ return cpuinfo_isa.vpclmulqdq;
1422
+ #else
1423
+ return false;
1424
+ #endif
1425
+ }
1426
+
1427
+ static inline bool cpuinfo_has_x86_gfni(void) {
1428
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1429
+ return cpuinfo_isa.gfni;
1430
+ #else
1431
+ return false;
1432
+ #endif
1433
+ }
1434
+
1435
+ static inline bool cpuinfo_has_x86_rdrand(void) {
1436
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1437
+ return cpuinfo_isa.rdrand;
1438
+ #else
1439
+ return false;
1440
+ #endif
1441
+ }
1442
+
1443
+ static inline bool cpuinfo_has_x86_rdseed(void) {
1444
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1445
+ return cpuinfo_isa.rdseed;
1446
+ #else
1447
+ return false;
1448
+ #endif
1449
+ }
1450
+
1451
+ static inline bool cpuinfo_has_x86_sha(void) {
1452
+ #if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
1453
+ return cpuinfo_isa.sha;
1454
+ #else
1455
+ return false;
1456
+ #endif
1457
+ }
1458
+
1459
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1460
+ /* This structure is not a part of stable API. Use cpuinfo_has_arm_* functions instead. */
1461
+ struct cpuinfo_arm_isa {
1462
+ #if CPUINFO_ARCH_ARM
1463
+ bool thumb;
1464
+ bool thumb2;
1465
+ bool thumbee;
1466
+ bool jazelle;
1467
+ bool armv5e;
1468
+ bool armv6;
1469
+ bool armv6k;
1470
+ bool armv7;
1471
+ bool armv7mp;
1472
+ bool armv8;
1473
+ bool idiv;
1474
+
1475
+ bool vfpv2;
1476
+ bool vfpv3;
1477
+ bool d32;
1478
+ bool fp16;
1479
+ bool fma;
1480
+
1481
+ bool wmmx;
1482
+ bool wmmx2;
1483
+ bool neon;
1484
+ #endif
1485
+ #if CPUINFO_ARCH_ARM64
1486
+ bool atomics;
1487
+ bool bf16;
1488
+ bool sve;
1489
+ bool sve2;
1490
+ bool i8mm;
1491
+ #endif
1492
+ bool rdm;
1493
+ bool fp16arith;
1494
+ bool dot;
1495
+ bool jscvt;
1496
+ bool fcma;
1497
+ bool fhm;
1498
+
1499
+ bool aes;
1500
+ bool sha1;
1501
+ bool sha2;
1502
+ bool pmull;
1503
+ bool crc32;
1504
+ };
1505
+
1506
+ extern struct cpuinfo_arm_isa cpuinfo_isa;
1507
+ #endif
1508
+
1509
+ static inline bool cpuinfo_has_arm_thumb(void) {
1510
+ #if CPUINFO_ARCH_ARM
1511
+ return cpuinfo_isa.thumb;
1512
+ #else
1513
+ return false;
1514
+ #endif
1515
+ }
1516
+
1517
+ static inline bool cpuinfo_has_arm_thumb2(void) {
1518
+ #if CPUINFO_ARCH_ARM
1519
+ return cpuinfo_isa.thumb2;
1520
+ #else
1521
+ return false;
1522
+ #endif
1523
+ }
1524
+
1525
+ static inline bool cpuinfo_has_arm_v5e(void) {
1526
+ #if CPUINFO_ARCH_ARM
1527
+ return cpuinfo_isa.armv5e;
1528
+ #else
1529
+ return false;
1530
+ #endif
1531
+ }
1532
+
1533
+ static inline bool cpuinfo_has_arm_v6(void) {
1534
+ #if CPUINFO_ARCH_ARM
1535
+ return cpuinfo_isa.armv6;
1536
+ #else
1537
+ return false;
1538
+ #endif
1539
+ }
1540
+
1541
+ static inline bool cpuinfo_has_arm_v6k(void) {
1542
+ #if CPUINFO_ARCH_ARM
1543
+ return cpuinfo_isa.armv6k;
1544
+ #else
1545
+ return false;
1546
+ #endif
1547
+ }
1548
+
1549
+ static inline bool cpuinfo_has_arm_v7(void) {
1550
+ #if CPUINFO_ARCH_ARM
1551
+ return cpuinfo_isa.armv7;
1552
+ #else
1553
+ return false;
1554
+ #endif
1555
+ }
1556
+
1557
+ static inline bool cpuinfo_has_arm_v7mp(void) {
1558
+ #if CPUINFO_ARCH_ARM
1559
+ return cpuinfo_isa.armv7mp;
1560
+ #else
1561
+ return false;
1562
+ #endif
1563
+ }
1564
+
1565
+ static inline bool cpuinfo_has_arm_v8(void) {
1566
+ #if CPUINFO_ARCH_ARM64
1567
+ return true;
1568
+ #elif CPUINFO_ARCH_ARM
1569
+ return cpuinfo_isa.armv8;
1570
+ #else
1571
+ return false;
1572
+ #endif
1573
+ }
1574
+
1575
+ static inline bool cpuinfo_has_arm_idiv(void) {
1576
+ #if CPUINFO_ARCH_ARM64
1577
+ return true;
1578
+ #elif CPUINFO_ARCH_ARM
1579
+ return cpuinfo_isa.idiv;
1580
+ #else
1581
+ return false;
1582
+ #endif
1583
+ }
1584
+
1585
+ static inline bool cpuinfo_has_arm_vfpv2(void) {
1586
+ #if CPUINFO_ARCH_ARM
1587
+ return cpuinfo_isa.vfpv2;
1588
+ #else
1589
+ return false;
1590
+ #endif
1591
+ }
1592
+
1593
+ static inline bool cpuinfo_has_arm_vfpv3(void) {
1594
+ #if CPUINFO_ARCH_ARM64
1595
+ return true;
1596
+ #elif CPUINFO_ARCH_ARM
1597
+ return cpuinfo_isa.vfpv3;
1598
+ #else
1599
+ return false;
1600
+ #endif
1601
+ }
1602
+
1603
+ static inline bool cpuinfo_has_arm_vfpv3_d32(void) {
1604
+ #if CPUINFO_ARCH_ARM64
1605
+ return true;
1606
+ #elif CPUINFO_ARCH_ARM
1607
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.d32;
1608
+ #else
1609
+ return false;
1610
+ #endif
1611
+ }
1612
+
1613
+ static inline bool cpuinfo_has_arm_vfpv3_fp16(void) {
1614
+ #if CPUINFO_ARCH_ARM64
1615
+ return true;
1616
+ #elif CPUINFO_ARCH_ARM
1617
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16;
1618
+ #else
1619
+ return false;
1620
+ #endif
1621
+ }
1622
+
1623
+ static inline bool cpuinfo_has_arm_vfpv3_fp16_d32(void) {
1624
+ #if CPUINFO_ARCH_ARM64
1625
+ return true;
1626
+ #elif CPUINFO_ARCH_ARM
1627
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fp16 && cpuinfo_isa.d32;
1628
+ #else
1629
+ return false;
1630
+ #endif
1631
+ }
1632
+
1633
+ static inline bool cpuinfo_has_arm_vfpv4(void) {
1634
+ #if CPUINFO_ARCH_ARM64
1635
+ return true;
1636
+ #elif CPUINFO_ARCH_ARM
1637
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma;
1638
+ #else
1639
+ return false;
1640
+ #endif
1641
+ }
1642
+
1643
+ static inline bool cpuinfo_has_arm_vfpv4_d32(void) {
1644
+ #if CPUINFO_ARCH_ARM64
1645
+ return true;
1646
+ #elif CPUINFO_ARCH_ARM
1647
+ return cpuinfo_isa.vfpv3 && cpuinfo_isa.fma && cpuinfo_isa.d32;
1648
+ #else
1649
+ return false;
1650
+ #endif
1651
+ }
1652
+
1653
+ static inline bool cpuinfo_has_arm_fp16_arith(void) {
1654
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1655
+ return cpuinfo_isa.fp16arith;
1656
+ #else
1657
+ return false;
1658
+ #endif
1659
+ }
1660
+
1661
+ static inline bool cpuinfo_has_arm_bf16(void) {
1662
+ #if CPUINFO_ARCH_ARM64
1663
+ return cpuinfo_isa.bf16;
1664
+ #else
1665
+ return false;
1666
+ #endif
1667
+ }
1668
+
1669
+ static inline bool cpuinfo_has_arm_wmmx(void) {
1670
+ #if CPUINFO_ARCH_ARM
1671
+ return cpuinfo_isa.wmmx;
1672
+ #else
1673
+ return false;
1674
+ #endif
1675
+ }
1676
+
1677
+ static inline bool cpuinfo_has_arm_wmmx2(void) {
1678
+ #if CPUINFO_ARCH_ARM
1679
+ return cpuinfo_isa.wmmx2;
1680
+ #else
1681
+ return false;
1682
+ #endif
1683
+ }
1684
+
1685
+ static inline bool cpuinfo_has_arm_neon(void) {
1686
+ #if CPUINFO_ARCH_ARM64
1687
+ return true;
1688
+ #elif CPUINFO_ARCH_ARM
1689
+ return cpuinfo_isa.neon;
1690
+ #else
1691
+ return false;
1692
+ #endif
1693
+ }
1694
+
1695
+ static inline bool cpuinfo_has_arm_neon_fp16(void) {
1696
+ #if CPUINFO_ARCH_ARM64
1697
+ return true;
1698
+ #elif CPUINFO_ARCH_ARM
1699
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16;
1700
+ #else
1701
+ return false;
1702
+ #endif
1703
+ }
1704
+
1705
+ static inline bool cpuinfo_has_arm_neon_fma(void) {
1706
+ #if CPUINFO_ARCH_ARM64
1707
+ return true;
1708
+ #elif CPUINFO_ARCH_ARM
1709
+ return cpuinfo_isa.neon && cpuinfo_isa.fma;
1710
+ #else
1711
+ return false;
1712
+ #endif
1713
+ }
1714
+
1715
+ static inline bool cpuinfo_has_arm_neon_v8(void) {
1716
+ #if CPUINFO_ARCH_ARM64
1717
+ return true;
1718
+ #elif CPUINFO_ARCH_ARM
1719
+ return cpuinfo_isa.neon && cpuinfo_isa.armv8;
1720
+ #else
1721
+ return false;
1722
+ #endif
1723
+ }
1724
+
1725
+ static inline bool cpuinfo_has_arm_atomics(void) {
1726
+ #if CPUINFO_ARCH_ARM64
1727
+ return cpuinfo_isa.atomics;
1728
+ #else
1729
+ return false;
1730
+ #endif
1731
+ }
1732
+
1733
+ static inline bool cpuinfo_has_arm_neon_rdm(void) {
1734
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1735
+ return cpuinfo_isa.rdm;
1736
+ #else
1737
+ return false;
1738
+ #endif
1739
+ }
1740
+
1741
+ static inline bool cpuinfo_has_arm_neon_fp16_arith(void) {
1742
+ #if CPUINFO_ARCH_ARM
1743
+ return cpuinfo_isa.neon && cpuinfo_isa.fp16arith;
1744
+ #elif CPUINFO_ARCH_ARM64
1745
+ return cpuinfo_isa.fp16arith;
1746
+ #else
1747
+ return false;
1748
+ #endif
1749
+ }
1750
+
1751
+ static inline bool cpuinfo_has_arm_fhm(void) {
1752
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1753
+ return cpuinfo_isa.fhm;
1754
+ #else
1755
+ return false;
1756
+ #endif
1757
+ }
1758
+
1759
+ static inline bool cpuinfo_has_arm_neon_dot(void) {
1760
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1761
+ return cpuinfo_isa.dot;
1762
+ #else
1763
+ return false;
1764
+ #endif
1765
+ }
1766
+
1767
+ static inline bool cpuinfo_has_arm_neon_bf16(void) {
1768
+ #if CPUINFO_ARCH_ARM64
1769
+ return cpuinfo_isa.bf16;
1770
+ #else
1771
+ return false;
1772
+ #endif
1773
+ }
1774
+
1775
+ static inline bool cpuinfo_has_arm_jscvt(void) {
1776
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1777
+ return cpuinfo_isa.jscvt;
1778
+ #else
1779
+ return false;
1780
+ #endif
1781
+ }
1782
+
1783
+ static inline bool cpuinfo_has_arm_fcma(void) {
1784
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1785
+ return cpuinfo_isa.fcma;
1786
+ #else
1787
+ return false;
1788
+ #endif
1789
+ }
1790
+
1791
+ static inline bool cpuinfo_has_arm_i8mm(void) {
1792
+ #if CPUINFO_ARCH_ARM64
1793
+ return cpuinfo_isa.i8mm;
1794
+ #else
1795
+ return false;
1796
+ #endif
1797
+ }
1798
+
1799
+ static inline bool cpuinfo_has_arm_aes(void) {
1800
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1801
+ return cpuinfo_isa.aes;
1802
+ #else
1803
+ return false;
1804
+ #endif
1805
+ }
1806
+
1807
+ static inline bool cpuinfo_has_arm_sha1(void) {
1808
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1809
+ return cpuinfo_isa.sha1;
1810
+ #else
1811
+ return false;
1812
+ #endif
1813
+ }
1814
+
1815
+ static inline bool cpuinfo_has_arm_sha2(void) {
1816
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1817
+ return cpuinfo_isa.sha2;
1818
+ #else
1819
+ return false;
1820
+ #endif
1821
+ }
1822
+
1823
+ static inline bool cpuinfo_has_arm_pmull(void) {
1824
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1825
+ return cpuinfo_isa.pmull;
1826
+ #else
1827
+ return false;
1828
+ #endif
1829
+ }
1830
+
1831
+ static inline bool cpuinfo_has_arm_crc32(void) {
1832
+ #if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
1833
+ return cpuinfo_isa.crc32;
1834
+ #else
1835
+ return false;
1836
+ #endif
1837
+ }
1838
+
1839
+ static inline bool cpuinfo_has_arm_sve(void) {
1840
+ #if CPUINFO_ARCH_ARM64
1841
+ return cpuinfo_isa.sve;
1842
+ #else
1843
+ return false;
1844
+ #endif
1845
+ }
1846
+
1847
+ static inline bool cpuinfo_has_arm_sve_bf16(void) {
1848
+ #if CPUINFO_ARCH_ARM64
1849
+ return cpuinfo_isa.sve && cpuinfo_isa.bf16;
1850
+ #else
1851
+ return false;
1852
+ #endif
1853
+ }
1854
+
1855
+ static inline bool cpuinfo_has_arm_sve2(void) {
1856
+ #if CPUINFO_ARCH_ARM64
1857
+ return cpuinfo_isa.sve2;
1858
+ #else
1859
+ return false;
1860
+ #endif
1861
+ }
1862
+
1863
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processors(void);
1864
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_cores(void);
1865
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_clusters(void);
1866
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_packages(void);
1867
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarchs(void);
1868
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void);
1869
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void);
1870
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void);
1871
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void);
1872
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void);
1873
+
1874
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_processor(uint32_t index);
1875
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_core(uint32_t index);
1876
+ const struct cpuinfo_cluster* CPUINFO_ABI cpuinfo_get_cluster(uint32_t index);
1877
+ const struct cpuinfo_package* CPUINFO_ABI cpuinfo_get_package(uint32_t index);
1878
+ const struct cpuinfo_uarch_info* CPUINFO_ABI cpuinfo_get_uarch(uint32_t index);
1879
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index);
1880
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index);
1881
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index);
1882
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index);
1883
+ const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index);
1884
+
1885
+ uint32_t CPUINFO_ABI cpuinfo_get_processors_count(void);
1886
+ uint32_t CPUINFO_ABI cpuinfo_get_cores_count(void);
1887
+ uint32_t CPUINFO_ABI cpuinfo_get_clusters_count(void);
1888
+ uint32_t CPUINFO_ABI cpuinfo_get_packages_count(void);
1889
+ uint32_t CPUINFO_ABI cpuinfo_get_uarchs_count(void);
1890
+ uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void);
1891
+ uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void);
1892
+ uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void);
1893
+ uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void);
1894
+ uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void);
1895
+
1896
+ /**
1897
+ * Returns upper bound on cache size.
1898
+ */
1899
+ uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void);
1900
+
1901
+ /**
1902
+ * Identify the logical processor that executes the current thread.
1903
+ *
1904
+ * There is no guarantee that the thread will stay on the same logical processor for any time.
1905
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1906
+ */
1907
+ const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void);
1908
+
1909
+ /**
1910
+ * Identify the core that executes the current thread.
1911
+ *
1912
+ * There is no guarantee that the thread will stay on the same core for any time.
1913
+ * Callers should treat the result as only a hint, and be prepared to handle NULL return value.
1914
+ */
1915
+ const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void);
1916
+
1917
+ /**
1918
+ * Identify the microarchitecture index of the core that executes the current thread.
1919
+ * If the system does not support such identification, the function returns 0.
1920
+ *
1921
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1922
+ * Callers should treat the result as only a hint.
1923
+ */
1924
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void);
1925
+
1926
+ /**
1927
+ * Identify the microarchitecture index of the core that executes the current thread.
1928
+ * If the system does not support such identification, the function returns the user-specified default value.
1929
+ *
1930
+ * There is no guarantee that the thread will stay on the same type of core for any time.
1931
+ * Callers should treat the result as only a hint.
1932
+ */
1933
+ uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index);
1934
+
1935
+ #ifdef __cplusplus
1936
+ } /* extern "C" */
1937
+ #endif
1938
+
1939
+ #endif /* CPUINFO_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_H
18
+ #define DNNL_H
19
+
20
+ #include "oneapi/dnnl/dnnl.h"
21
+
22
+ #endif /* DNNL_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_config.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_CONFIG_H
18
+ #define DNNL_CONFIG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_config.h"
21
+
22
+ #endif /* DNNL_CONFIG_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_debug.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_DEBUG_H
18
+ #define DNNL_DEBUG_H
19
+
20
+ #include "oneapi/dnnl/dnnl_debug.h"
21
+
22
+ #endif /* DNNL_DEBUG_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_ocl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_OCL_H
18
+ #define DNNL_OCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_ocl.h"
21
+
22
+ #endif /* DNNL_OCL_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_H
18
+ #define DNNL_SYCL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl.h"
21
+
22
+ #endif /* DNNL_SYCL_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_sycl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_SYCL_TYPES_H
18
+ #define DNNL_SYCL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_sycl_types.h"
21
+
22
+ #endif /* DNNL_SYCL_TYPES_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_threadpool.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_THREADPOOL_H
18
+ #define DNNL_THREADPOOL_H
19
+
20
+ #include "oneapi/dnnl/dnnl_threadpool.h"
21
+
22
+ #endif /* DNNL_THREADPOOL_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_types.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_TYPES_H
18
+ #define DNNL_TYPES_H
19
+
20
+ #include "oneapi/dnnl/dnnl_types.h"
21
+
22
+ #endif /* DNNL_TYPES_H */
env-llmeval/lib/python3.10/site-packages/torch/include/dnnl_version.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*******************************************************************************
2
+ * Copyright 2020 Intel Corporation
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ *******************************************************************************/
16
+
17
+ #ifndef DNNL_VERSION_H
18
+ #define DNNL_VERSION_H
19
+
20
+ #include "oneapi/dnnl/dnnl_version.h"
21
+
22
+ #endif /* DNNL_VERSION_H */
env-llmeval/lib/python3.10/site-packages/torch/include/fp16.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FP16_H
3
+ #define FP16_H
4
+
5
+ #include <fp16/fp16.h>
6
+
7
+ #if defined(PSIMD_H)
8
+ #include <fp16/psimd.h>
9
+ #endif
10
+
11
+ #endif /* FP16_H */
env-llmeval/lib/python3.10/site-packages/torch/include/fxdiv.h ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef FXDIV_H
3
+ #define FXDIV_H
4
+
5
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
6
+ #include <cstddef>
7
+ #include <cstdint>
8
+ #include <climits>
9
+ #elif !defined(__OPENCL_VERSION__)
10
+ #include <stddef.h>
11
+ #include <stdint.h>
12
+ #include <limits.h>
13
+ #endif
14
+
15
+ #if defined(_MSC_VER)
16
+ #include <intrin.h>
17
+ #if defined(_M_IX86) || defined(_M_X64)
18
+ #include <immintrin.h>
19
+ #endif
20
+ #endif
21
+
22
+ #ifndef FXDIV_USE_INLINE_ASSEMBLY
23
+ #define FXDIV_USE_INLINE_ASSEMBLY 0
24
+ #endif
25
+
26
+ static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) {
27
+ #if defined(_MSC_VER) && defined(_M_IX86)
28
+ return (uint64_t) __emulu((unsigned int) a, (unsigned int) b);
29
+ #else
30
+ return (uint64_t) a * (uint64_t) b;
31
+ #endif
32
+ }
33
+
34
+ static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) {
35
+ #if defined(__OPENCL_VERSION__)
36
+ return mul_hi(a, b);
37
+ #elif defined(__CUDA_ARCH__)
38
+ return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b);
39
+ #elif defined(_MSC_VER) && defined(_M_IX86)
40
+ return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32);
41
+ #elif defined(_MSC_VER) && defined(_M_ARM)
42
+ return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b);
43
+ #else
44
+ return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32);
45
+ #endif
46
+ }
47
+
48
+ static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) {
49
+ #if defined(__OPENCL_VERSION__)
50
+ return mul_hi(a, b);
51
+ #elif defined(__CUDA_ARCH__)
52
+ return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b);
53
+ #elif defined(_MSC_VER) && defined(_M_X64)
54
+ return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b);
55
+ #elif defined(__GNUC__) && defined(__SIZEOF_INT128__)
56
+ return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64);
57
+ #else
58
+ const uint32_t a_lo = (uint32_t) a;
59
+ const uint32_t a_hi = (uint32_t) (a >> 32);
60
+ const uint32_t b_lo = (uint32_t) b;
61
+ const uint32_t b_hi = (uint32_t) (b >> 32);
62
+
63
+ const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) +
64
+ (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo);
65
+ return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) +
66
+ ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32);
67
+ #endif
68
+ }
69
+
70
+ static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) {
71
+ #if SIZE_MAX == UINT32_MAX
72
+ return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b);
73
+ #elif SIZE_MAX == UINT64_MAX
74
+ return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b);
75
+ #else
76
+ #error Unsupported platform
77
+ #endif
78
+ }
79
+
80
+ struct fxdiv_divisor_uint32_t {
81
+ uint32_t value;
82
+ uint32_t m;
83
+ uint8_t s1;
84
+ uint8_t s2;
85
+ };
86
+
87
+ struct fxdiv_result_uint32_t {
88
+ uint32_t quotient;
89
+ uint32_t remainder;
90
+ };
91
+
92
+ struct fxdiv_divisor_uint64_t {
93
+ uint64_t value;
94
+ uint64_t m;
95
+ uint8_t s1;
96
+ uint8_t s2;
97
+ };
98
+
99
+ struct fxdiv_result_uint64_t {
100
+ uint64_t quotient;
101
+ uint64_t remainder;
102
+ };
103
+
104
+ struct fxdiv_divisor_size_t {
105
+ size_t value;
106
+ size_t m;
107
+ uint8_t s1;
108
+ uint8_t s2;
109
+ };
110
+
111
+ struct fxdiv_result_size_t {
112
+ size_t quotient;
113
+ size_t remainder;
114
+ };
115
+
116
+ static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) {
117
+ struct fxdiv_divisor_uint32_t result = { d };
118
+ if (d == 1) {
119
+ result.m = UINT32_C(1);
120
+ result.s1 = 0;
121
+ result.s2 = 0;
122
+ } else {
123
+ #if defined(__OPENCL_VERSION__)
124
+ const uint32_t l_minus_1 = 31 - clz(d - 1);
125
+ #elif defined(__CUDA_ARCH__)
126
+ const uint32_t l_minus_1 = 31 - __clz((int) (d - 1));
127
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64))
128
+ unsigned long l_minus_1;
129
+ _BitScanReverse(&l_minus_1, (unsigned long) (d - 1));
130
+ #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY
131
+ uint32_t l_minus_1;
132
+ __asm__("BSRL %[d_minus_1], %[l_minus_1]"
133
+ : [l_minus_1] "=r" (l_minus_1)
134
+ : [d_minus_1] "r" (d - 1)
135
+ : "cc");
136
+ #elif defined(__GNUC__)
137
+ const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1);
138
+ #else
139
+ /* Based on Algorithm 2 from Hacker's delight */
140
+
141
+ uint32_t l_minus_1 = 0;
142
+ uint32_t x = d - 1;
143
+ uint32_t y = x >> 16;
144
+ if (y != 0) {
145
+ l_minus_1 += 16;
146
+ x = y;
147
+ }
148
+ y = x >> 8;
149
+ if (y != 0) {
150
+ l_minus_1 += 8;
151
+ x = y;
152
+ }
153
+ y = x >> 4;
154
+ if (y != 0) {
155
+ l_minus_1 += 4;
156
+ x = y;
157
+ }
158
+ y = x >> 2;
159
+ if (y != 0) {
160
+ l_minus_1 += 2;
161
+ x = y;
162
+ }
163
+ if ((x & 2) != 0) {
164
+ l_minus_1 += 1;
165
+ }
166
+ #endif
167
+ uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d;
168
+
169
+ /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */
170
+ #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY
171
+ uint32_t q;
172
+ __asm__("DIVL %[d]"
173
+ : "=a" (q), "+d" (u_hi)
174
+ : [d] "r" (d), "a" (0)
175
+ : "cc");
176
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64))
177
+ unsigned int remainder;
178
+ const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder);
179
+ #else
180
+ const uint32_t q = ((uint64_t) u_hi << 32) / d;
181
+ #endif
182
+
183
+ result.m = q + UINT32_C(1);
184
+ result.s1 = 1;
185
+ result.s2 = (uint8_t) l_minus_1;
186
+ }
187
+ return result;
188
+ }
189
+
190
+ static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) {
191
+ struct fxdiv_divisor_uint64_t result = { d };
192
+ if (d == 1) {
193
+ result.m = UINT64_C(1);
194
+ result.s1 = 0;
195
+ result.s2 = 0;
196
+ } else {
197
+ #if defined(__OPENCL_VERSION__)
198
+ const uint32_t nlz_d = clz(d);
199
+ const uint32_t l_minus_1 = 63 - clz(d - 1);
200
+ #elif defined(__CUDA_ARCH__)
201
+ const uint32_t nlz_d = __clzll((long long) d);
202
+ const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1));
203
+ #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
204
+ unsigned long l_minus_1;
205
+ _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1));
206
+ unsigned long bsr_d;
207
+ _BitScanReverse64(&bsr_d, (unsigned __int64) d);
208
+ const uint32_t nlz_d = bsr_d ^ 0x3F;
209
+ #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM))
210
+ const uint64_t d_minus_1 = d - 1;
211
+ const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0;
212
+ unsigned long l_minus_1;
213
+ if ((uint32_t) (d_minus_1 >> 32) == 0) {
214
+ _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1);
215
+ } else {
216
+ _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32));
217
+ l_minus_1 += 32;
218
+ }
219
+ const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2;
220
+ #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
221
+ uint64_t l_minus_1;
222
+ __asm__("BSRQ %[d_minus_1], %[l_minus_1]"
223
+ : [l_minus_1] "=r" (l_minus_1)
224
+ : [d_minus_1] "r" (d - 1)
225
+ : "cc");
226
+ #elif defined(__GNUC__)
227
+ const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1);
228
+ const uint32_t nlz_d = __builtin_clzll(d);
229
+ #else
230
+ /* Based on Algorithm 2 from Hacker's delight */
231
+ const uint64_t d_minus_1 = d - 1;
232
+ const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0;
233
+ uint32_t l_minus_1 = 0;
234
+ uint32_t x = (uint32_t) d_minus_1;
235
+ uint32_t y = d_minus_1 >> 32;
236
+ if (y != 0) {
237
+ l_minus_1 += 32;
238
+ x = y;
239
+ }
240
+ y = x >> 16;
241
+ if (y != 0) {
242
+ l_minus_1 += 16;
243
+ x = y;
244
+ }
245
+ y = x >> 8;
246
+ if (y != 0) {
247
+ l_minus_1 += 8;
248
+ x = y;
249
+ }
250
+ y = x >> 4;
251
+ if (y != 0) {
252
+ l_minus_1 += 4;
253
+ x = y;
254
+ }
255
+ y = x >> 2;
256
+ if (y != 0) {
257
+ l_minus_1 += 2;
258
+ x = y;
259
+ }
260
+ if ((x & 2) != 0) {
261
+ l_minus_1 += 1;
262
+ }
263
+ const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2;
264
+ #endif
265
+ uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d;
266
+
267
+ /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */
268
+ #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY
269
+ uint64_t q;
270
+ __asm__("DIVQ %[d]"
271
+ : "=a" (q), "+d" (u_hi)
272
+ : [d] "r" (d), "a" (UINT64_C(0))
273
+ : "cc");
274
+ #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__)
275
+ /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */
276
+ const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d));
277
+ #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64)
278
+ unsigned __int64 remainder;
279
+ const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder);
280
+ #else
281
+ /* Implementation based on code from Hacker's delight */
282
+
283
+ /* Normalize divisor and shift divident left */
284
+ d <<= nlz_d;
285
+ u_hi <<= nlz_d;
286
+ /* Break divisor up into two 32-bit digits */
287
+ const uint64_t d_hi = (uint32_t) (d >> 32);
288
+ const uint32_t d_lo = (uint32_t) d;
289
+
290
+ /* Compute the first quotient digit, q1 */
291
+ uint64_t q1 = u_hi / d_hi;
292
+ uint64_t r1 = u_hi - q1 * d_hi;
293
+
294
+ while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) {
295
+ q1 -= 1;
296
+ r1 += d_hi;
297
+ if ((r1 >> 32) != 0) {
298
+ break;
299
+ }
300
+ }
301
+
302
+ /* Multiply and subtract. */
303
+ u_hi = (u_hi << 32) - q1 * d;
304
+
305
+ /* Compute the second quotient digit, q0 */
306
+ uint64_t q0 = u_hi / d_hi;
307
+ uint64_t r0 = u_hi - q0 * d_hi;
308
+
309
+ while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) {
310
+ q0 -= 1;
311
+ r0 += d_hi;
312
+ if ((r0 >> 32) != 0) {
313
+ break;
314
+ }
315
+ }
316
+ const uint64_t q = (q1 << 32) | (uint32_t) q0;
317
+ #endif
318
+ result.m = q + UINT64_C(1);
319
+ result.s1 = 1;
320
+ result.s2 = (uint8_t) l_minus_1;
321
+ }
322
+ return result;
323
+ }
324
+
325
+ static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) {
326
+ #if SIZE_MAX == UINT32_MAX
327
+ const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d);
328
+ #elif SIZE_MAX == UINT64_MAX
329
+ const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d);
330
+ #else
331
+ #error Unsupported platform
332
+ #endif
333
+ struct fxdiv_divisor_size_t size_result = {
334
+ (size_t) uint_result.value,
335
+ (size_t) uint_result.m,
336
+ uint_result.s1,
337
+ uint_result.s2
338
+ };
339
+ return size_result;
340
+ }
341
+
342
+ static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
343
+ const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m);
344
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
345
+ }
346
+
347
+ static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
348
+ const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m);
349
+ return (t + ((n - t) >> divisor.s1)) >> divisor.s2;
350
+ }
351
+
352
+ static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
353
+ #if SIZE_MAX == UINT32_MAX
354
+ const struct fxdiv_divisor_uint32_t uint32_divisor = {
355
+ (uint32_t) divisor.value,
356
+ (uint32_t) divisor.m,
357
+ divisor.s1,
358
+ divisor.s2
359
+ };
360
+ return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor);
361
+ #elif SIZE_MAX == UINT64_MAX
362
+ const struct fxdiv_divisor_uint64_t uint64_divisor = {
363
+ (uint64_t) divisor.value,
364
+ (uint64_t) divisor.m,
365
+ divisor.s1,
366
+ divisor.s2
367
+ };
368
+ return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor);
369
+ #else
370
+ #error Unsupported platform
371
+ #endif
372
+ }
373
+
374
+ static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
375
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
376
+ return n - quotient * divisor.value;
377
+ }
378
+
379
+ static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
380
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
381
+ return n - quotient * divisor.value;
382
+ }
383
+
384
+ static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
385
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
386
+ return n - quotient * divisor.value;
387
+ }
388
+
389
+ static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) {
390
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity);
391
+ return quotient * granularity.value;
392
+ }
393
+
394
+ static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) {
395
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity);
396
+ return quotient * granularity.value;
397
+ }
398
+
399
+ static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) {
400
+ const size_t quotient = fxdiv_quotient_size_t(n, granularity);
401
+ return quotient * granularity.value;
402
+ }
403
+
404
+ static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) {
405
+ const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor);
406
+ const uint32_t remainder = n - quotient * divisor.value;
407
+ struct fxdiv_result_uint32_t result = { quotient, remainder };
408
+ return result;
409
+ }
410
+
411
+ static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) {
412
+ const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor);
413
+ const uint64_t remainder = n - quotient * divisor.value;
414
+ struct fxdiv_result_uint64_t result = { quotient, remainder };
415
+ return result;
416
+ }
417
+
418
+ static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) {
419
+ const size_t quotient = fxdiv_quotient_size_t(n, divisor);
420
+ const size_t remainder = n - quotient * divisor.value;
421
+ struct fxdiv_result_size_t result = { quotient, remainder };
422
+ return result;
423
+ }
424
+
425
+ #endif /* FXDIV_H */
env-llmeval/lib/python3.10/site-packages/torch/include/libshm.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/MapAllocator.h>
4
+
5
+ #ifdef __cplusplus
6
+
7
+ void libshm_init(const char* manager_exec_path);
8
+
9
+ // Superclass to run a constructor before at::RefcountedMapAllocator
10
+ class THManagedMapAllocatorInit {
11
+ protected:
12
+ THManagedMapAllocatorInit(const char* manager_handle, const char* filename);
13
+ std::string manager_handle_;
14
+ };
15
+
16
+ // Like a at::RefcountedMapAllocator, but it also makes use of an external
17
+ // shared memory manager process to ensure that shared memory regions actually
18
+ // get freed in the end (even if processes lose the memory).
19
+ class THManagedMapAllocator : private THManagedMapAllocatorInit,
20
+ public at::RefcountedMapAllocator {
21
+ public:
22
+ THManagedMapAllocator(
23
+ const char* manager_handle,
24
+ const char* filename,
25
+ int flags,
26
+ size_t size);
27
+
28
+ void close() override;
29
+
30
+ ~THManagedMapAllocator() override {
31
+ close();
32
+ }
33
+
34
+ static at::DataPtr makeDataPtr(
35
+ const char* manager_handle,
36
+ const char* filename,
37
+ int flags,
38
+ size_t size);
39
+ static THManagedMapAllocator* fromDataPtr(const at::DataPtr&);
40
+
41
+ const char* manager_handle() const {
42
+ return manager_handle_.c_str();
43
+ }
44
+ };
45
+
46
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/nnpack.h ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <stddef.h>
4
+ #include <stdint.h>
5
+ #include <stdbool.h>
6
+
7
+ #include <pthreadpool.h>
8
+
9
+ #ifdef __cplusplus
10
+ extern "C" {
11
+ #endif
12
+
13
+ /**
14
+ * @brief Status code for any NNPACK function call.
15
+ */
16
+ enum nnp_status {
17
+ /** The call succeeded, and all output arguments now contain valid data. */
18
+ nnp_status_success = 0,
19
+ /** NNPACK function was called with batch_size == 0. */
20
+ nnp_status_invalid_batch_size = 2,
21
+ /** NNPACK function was called with channels == 0. */
22
+ nnp_status_invalid_channels = 3,
23
+ /** NNPACK function was called with input_channels == 0. */
24
+ nnp_status_invalid_input_channels = 4,
25
+ /** NNPACK function was called with output_channels == 0. */
26
+ nnp_status_invalid_output_channels = 5,
27
+ /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */
28
+ nnp_status_invalid_input_size = 10,
29
+ /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */
30
+ nnp_status_invalid_input_stride = 11,
31
+ /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.:
32
+ *
33
+ * - input_padding.left >= kernel_size.width (>= pooling_size.width)
34
+ * - input_padding.right >= kernel_size.width (>= pooling_size.width)
35
+ * - input_padding.top >= kernel_size.height (>= pooling_size.height)
36
+ * - input_padding.bottom >= kernel_size.height (>= pooling_size.height)
37
+ */
38
+ nnp_status_invalid_input_padding = 12,
39
+ /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */
40
+ nnp_status_invalid_kernel_size = 13,
41
+ /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */
42
+ nnp_status_invalid_pooling_size = 14,
43
+ /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */
44
+ nnp_status_invalid_pooling_stride = 15,
45
+ /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */
46
+ nnp_status_invalid_algorithm = 16,
47
+ /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */
48
+ nnp_status_invalid_transform_strategy = 17,
49
+ /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */
50
+ nnp_status_invalid_output_subsampling = 13,
51
+ /** NNPACK function was called with activation not in nnp_activation enum */
52
+ nnp_status_invalid_activation = 14,
53
+ /** NNPACK function was called with invalid activation parameters */
54
+ nnp_status_invalid_activation_parameters = 15,
55
+
56
+ /** NNPACK does not support the particular input size for the function */
57
+ nnp_status_unsupported_input_size = 20,
58
+ /** NNPACK does not support the particular input stride for the function */
59
+ nnp_status_unsupported_input_stride = 21,
60
+ /** NNPACK does not support the particular input padding for the function */
61
+ nnp_status_unsupported_input_padding = 22,
62
+ /** NNPACK does not support the particular kernel size for the function */
63
+ nnp_status_unsupported_kernel_size = 23,
64
+ /** NNPACK does not support the particular pooling size for the function */
65
+ nnp_status_unsupported_pooling_size = 24,
66
+ /** NNPACK does not support the particular pooling stride for the function */
67
+ nnp_status_unsupported_pooling_stride = 25,
68
+ /** NNPACK does not support the particular convolution algorithm for the function */
69
+ nnp_status_unsupported_algorithm = 26,
70
+ /** NNPACK does not support the particular convolution transform strategy for the algorithm */
71
+ nnp_status_unsupported_transform_strategy = 27,
72
+ /** NNPACK does not support the particular activation function for the function */
73
+ nnp_status_unsupported_activation = 28,
74
+ /** NNPACK does not support the particular activation function parameters for the function */
75
+ nnp_status_unsupported_activation_parameters = 29,
76
+
77
+ /** NNPACK function was called before the library was initialized */
78
+ nnp_status_uninitialized = 50,
79
+ /** NNPACK does not implement this function for the host CPU */
80
+ nnp_status_unsupported_hardware = 51,
81
+ /** NNPACK failed to allocate memory for temporary buffers */
82
+ nnp_status_out_of_memory = 52,
83
+ /** Scratch space buffer is too small */
84
+ nnp_status_insufficient_buffer = 53,
85
+ /** Scratch space buffer is not properly aligned */
86
+ nnp_status_misaligned_buffer = 54
87
+ };
88
+
89
+ /**
90
+ * @brief Activation applied applied after a convolutional or fully-connected layer.
91
+ */
92
+ enum nnp_activation {
93
+ /** Identity activation f(x) := x, i.e. no transformation */
94
+ nnp_activation_identity = 0,
95
+ /** ReLU activation f(x) := max(0, x) */
96
+ nnp_activation_relu = 1,
97
+ };
98
+
99
+ /**
100
+ * @brief Algorithm for computing convolutional layers.
101
+ */
102
+ enum nnp_convolution_algorithm {
103
+ /** Let NNPACK choose the algorithm depending on layer parameters */
104
+ nnp_convolution_algorithm_auto = 0,
105
+ /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */
106
+ nnp_convolution_algorithm_ft8x8 = 1,
107
+ /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */
108
+ nnp_convolution_algorithm_ft16x16 = 2,
109
+ /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */
110
+ nnp_convolution_algorithm_wt8x8 = 3,
111
+ /** Direct convolution via implicit GEMM. */
112
+ nnp_convolution_algorithm_implicit_gemm = 4,
113
+ /** Direct convolution implementation. */
114
+ nnp_convolution_algorithm_direct = 5,
115
+ /**
116
+ * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16.
117
+ * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP),
118
+ * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8.
119
+ */
120
+ nnp_convolution_algorithm_wt8x8_fp16 = 6,
121
+ };
122
+
123
+ enum nnp_convolution_transform_strategy {
124
+ nnp_convolution_transform_strategy_compute = 1,
125
+ nnp_convolution_transform_strategy_precompute = 2,
126
+ nnp_convolution_transform_strategy_reuse = 3
127
+ };
128
+
129
+ /* For backward compatibility */
130
+ #define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute
131
+ #define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute
132
+
133
+ /**
134
+ * @brief Size of images, kernels, and pooling filters in NNPACK.
135
+ */
136
+ struct nnp_size {
137
+ /** Width (horizontal size) of an image, kernel, or pooling filter. */
138
+ size_t width;
139
+ /** Height (vertical size) of an image, kernel, or pooling filter. */
140
+ size_t height;
141
+ };
142
+
143
+ /**
144
+ * @brief Padding of images in NNPACK.
145
+ */
146
+ struct nnp_padding {
147
+ /** Padding above the image data */
148
+ size_t top;
149
+ /** Padding on the right of image data */
150
+ size_t right;
151
+ /** Padding below the image data */
152
+ size_t bottom;
153
+ /** Padding on the left of image data */
154
+ size_t left;
155
+ };
156
+
157
+ /**
158
+ * @brief Profiling information about time spent in different phases of a function call.
159
+ */
160
+ struct nnp_profile {
161
+ /** Time spent inside the function call, in seconds. */
162
+ double total;
163
+ /** Time spend on transformation of the input or input gradient tensor, in seconds. */
164
+ double input_transform;
165
+ /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */
166
+ double kernel_transform;
167
+ /** Time spend on transformation of the output or output gradient tensor, in seconds. */
168
+ double output_transform;
169
+ /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */
170
+ double block_multiplication;
171
+ };
172
+
173
+ enum nnp_status nnp_initialize(void);
174
+
175
+ enum nnp_status nnp_deinitialize(void);
176
+
177
+ /**
178
+ * @brief Computes output of a 2D convolutional layer from input and kernel tensors.
179
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
180
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
181
+ * For minibatch size 1, use nnp_convolution_inference for optimal performance.
182
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
183
+ *
184
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
185
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
186
+ * Supports kernels up to 8x8.
187
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
188
+ * Supports kernels up to 16x16.
189
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
190
+ * Supports only 3x3 kernels.
191
+ *
192
+ * @param batch_size The number of images on the input and output of the convolutional layer.
193
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
194
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images.
195
+ * @param input_size Size of input images, excluding implicit zero-padding.
196
+ * @param input_padding Implicit zero-padding of input images.
197
+ * @param kernel_size Kernel size.
198
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
199
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
200
+ * @param[in] bias A 1D array bias[output_channels].
201
+ * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where
202
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
203
+ * (kernel_size.height - 1)
204
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
205
+ * (kernel_size.width - 1)
206
+ * @param threadpool A thread pool for parallelization of the computation.
207
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
208
+ * @param[out] profile An optional pointer to profiling structure.
209
+ * If provided, the structure would record time spent in different phases of the computation.
210
+ */
211
+
212
+ enum nnp_status nnp_convolution_output(
213
+ enum nnp_convolution_algorithm algorithm,
214
+ size_t batch_size,
215
+ size_t input_channels,
216
+ size_t output_channels,
217
+ struct nnp_size input_size,
218
+ struct nnp_padding input_padding,
219
+ struct nnp_size kernel_size,
220
+ const float* input,
221
+ const float* kernel,
222
+ const float* bias,
223
+ float* output,
224
+ void* workspace_buffer,
225
+ size_t* workspace_size,
226
+ enum nnp_activation activation,
227
+ const void* activation_parameters,
228
+ pthreadpool_t threadpool,
229
+ struct nnp_profile* profile);
230
+
231
+ /**
232
+ * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors.
233
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
234
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
235
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
236
+ *
237
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
238
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
239
+ * Supports kernels up to 8x8.
240
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
241
+ * Supports kernels up to 16x16.
242
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
243
+ * Supports only 3x3 kernels.
244
+ *
245
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
246
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients).
247
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
248
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
249
+ * @param input_padding Implicit zero-padding of input images.
250
+ * @param kernel_size Kernel size.
251
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
252
+ * where
253
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
254
+ * (kernel_size.height - 1)
255
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
256
+ * (kernel_size.width - 1)
257
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
258
+ * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width].
259
+ * @param threadpool A thread pool for parallelization of the computation.
260
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
261
+ * @param[out] profile An optional pointer to profiling structure.
262
+ * If provided, the structure would record time spent in different phases of the computation.
263
+ */
264
+ enum nnp_status nnp_convolution_input_gradient(
265
+ enum nnp_convolution_algorithm algorithm,
266
+ size_t batch_size,
267
+ size_t input_channels,
268
+ size_t output_channels,
269
+ struct nnp_size input_size,
270
+ struct nnp_padding input_padding,
271
+ struct nnp_size kernel_size,
272
+ const float* grad_output,
273
+ const float* kernel,
274
+ float* grad_input,
275
+ void* workspace_buffer,
276
+ size_t* workspace_size,
277
+ enum nnp_activation activation,
278
+ const void* activation_parameters,
279
+ pthreadpool_t threadpool,
280
+ struct nnp_profile* profile);
281
+
282
+ /**
283
+ * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors.
284
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
285
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
286
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
287
+ *
288
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
289
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
290
+ * Supports kernels up to 8x8.
291
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
292
+ * Supports kernels up to 16x16.
293
+ *
294
+ * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer.
295
+ * @param input_channels The number of channels (AKA features, dimensions) in the input images.
296
+ * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients).
297
+ * @param input_size Size of input images and their gradients, excluding implicit zero-padding.
298
+ * @param input_padding Implicit zero-padding of input images.
299
+ * @param kernel_size Kernel size.
300
+ * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width].
301
+ * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width]
302
+ * where
303
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
304
+ * (kernel_size.height - 1)
305
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
306
+ * (kernel_size.width - 1)
307
+ * @param[out] grad_kernel A 4D tensor
308
+ * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
309
+ * @param threadpool A thread pool for parallelization of the computation.
310
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
311
+ * @param[out] profile An optional pointer to profiling structure.
312
+ * If provided, the structure would record time spent in different phases of the computation.
313
+ */
314
+ enum nnp_status nnp_convolution_kernel_gradient(
315
+ enum nnp_convolution_algorithm algorithm,
316
+ size_t batch_size,
317
+ size_t input_channels,
318
+ size_t output_channels,
319
+ struct nnp_size input_size,
320
+ struct nnp_padding input_padding,
321
+ struct nnp_size kernel_size,
322
+ const float* input,
323
+ const float* grad_output,
324
+ float* grad_kernel,
325
+ void* workspace_buffer,
326
+ size_t* workspace_size,
327
+ enum nnp_activation activation,
328
+ const void* activation_parameters,
329
+ pthreadpool_t threadpool,
330
+ struct nnp_profile* profile);
331
+
332
+ /**
333
+ * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor.
334
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
335
+ * @param algorithm The type of algorithm to use for convolution. Possible values are:
336
+ *
337
+ * - nnp_convolution_algorithm_auto -- let the function choose the algorithm.
338
+ * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks.
339
+ * Supports kernels up to 8x8.
340
+ * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks.
341
+ * Supports kernels up to 16x16.
342
+ * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6).
343
+ * Supports only 3x3 kernels.
344
+ *
345
+ * @param transform_strategy A strategy that guides computation of kernel transforms coefficients.
346
+ * Possible values are:
347
+ *
348
+ * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed
349
+ * coefficients.
350
+ * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed
351
+ * coefficients.
352
+ *
353
+ * @param input_channels The number of channels (AKA features, dimensions) in the input image.
354
+ * @param output_channels The number of channels (AKA features, dimensions) in the output image.
355
+ * @param input_size Size of input image, excluding implicit zero-padding.
356
+ * @param input_padding Implicit zero-padding of input image.
357
+ * @param kernel_size Kernel size.
358
+ * @param output_subsampling Subsample region for output, also known as convolution stride.
359
+ * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width].
360
+ * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width].
361
+ * @param[in] bias A 1D array bias[output_channels].
362
+ * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where
363
+ * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) -
364
+ * (kernel_size.height - 1)
365
+ * output_size.width = (input_padding.left + input_size.width + input_padding.right) -
366
+ * (kernel_size.width - 1)
367
+ * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes.
368
+ * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size
369
+ * of required workspace memory at the workspace_size location, and exit without
370
+ * computations.
371
+ * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory
372
+ * before and deallocate after this computation, potentially at significant runtime cost.
373
+ * @param[in,out] workspace_size Pointer to the size of workspace buffer.
374
+ * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to
375
+ * the location specified by this pointer.
376
+ * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of
377
+ * the buffer, in bytes.
378
+ * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK
379
+ * would allocate memory before and deallocate after this computation, potentially at
380
+ * significant runtime cost.
381
+ * @param threadpool A thread pool for parallelization of the computation.
382
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
383
+ * @param[out] profile An optional pointer to profiling structure.
384
+ * If provided, the structure would record time spent in different phases of the computation.
385
+ */
386
+ enum nnp_status nnp_convolution_inference(
387
+ enum nnp_convolution_algorithm algorithm,
388
+ enum nnp_convolution_transform_strategy transform_strategy,
389
+ size_t input_channels,
390
+ size_t output_channels,
391
+ struct nnp_size input_size,
392
+ struct nnp_padding input_padding,
393
+ struct nnp_size kernel_size,
394
+ struct nnp_size output_subsampling,
395
+ const float* input,
396
+ const float* kernel,
397
+ const float* bias,
398
+ float* output,
399
+ void* workspace_buffer,
400
+ size_t* workspace_size,
401
+ enum nnp_activation activation,
402
+ const void* activation_parameters,
403
+ pthreadpool_t threadpool,
404
+ struct nnp_profile* profile);
405
+
406
+ /**
407
+ * @brief Computes output of a fully connected layer from input and kernel matrices.
408
+ * @details This function targets training of convolutional neural networks and performs forward propagation.
409
+ * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch.
410
+ * For minibatch size 1, use nnp_fully_connected_inference for optimal performance.
411
+ * @param batch_size The number of vectors on the input and output of the fully connected layer.
412
+ * @param input_channels The number of channels (AKA features, dimensions) in the input matrix.
413
+ * @param output_channels The number of channels (AKA features, dimensions) in the output matrix.
414
+ * @param[in] input A 2D matrix input[batch_size][input_channels].
415
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels].
416
+ * @param[out] output A 2D matrix output[batch_size][output_channels].
417
+ * @param threadpool A thread pool for parallelization of the computation.
418
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
419
+ */
420
+ enum nnp_status nnp_fully_connected_output(
421
+ size_t batch_size,
422
+ size_t input_channels,
423
+ size_t output_channels,
424
+ const float input[],
425
+ const float kernel[],
426
+ float output[],
427
+ pthreadpool_t threadpool,
428
+ struct nnp_profile* profile);
429
+
430
+ /**
431
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
432
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
433
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
434
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
435
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
436
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements.
437
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
438
+ * @param threadpool A thread pool for parallelization of the computation.
439
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
440
+ */
441
+ enum nnp_status nnp_fully_connected_inference(
442
+ size_t input_channels,
443
+ size_t output_channels,
444
+ const float* input,
445
+ const float* kernel,
446
+ float* output,
447
+ pthreadpool_t threadpool);
448
+
449
+ /**
450
+ * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix.
451
+ * @details This function targets prediction with convolutional neural networks and performs forward propagation.
452
+ * @param input_channels The number of channels (AKA features, dimensions) in the input vector.
453
+ * @param output_channels The number of channels (AKA features, dimensions) in the output vector.
454
+ * @param[in] input A 1D array input[input_channels] of FP32 elements.
455
+ * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements.
456
+ * @param[out] output A 1D array output[output_channels] of FP32 elements.
457
+ * @param threadpool A thread pool for parallelization of the computation.
458
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
459
+ */
460
+ enum nnp_status nnp_fully_connected_inference_f16f32(
461
+ size_t input_channels,
462
+ size_t output_channels,
463
+ const float* input,
464
+ const void* kernel,
465
+ float* output,
466
+ pthreadpool_t threadpool);
467
+
468
+ /**
469
+ * @brief Computes output of a max-pooling layer for an input tensor.
470
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
471
+ * propagation. Is is optimized for both large and small minibatch sizes.
472
+ * @param batch_size The number of images on the input and output of the max-pooling layer.
473
+ * @param channels The number of channels (AKA features, dimensions) in both input and output images.
474
+ * @param input_size Size of input images, excluding implicit zero-padding.
475
+ * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but
476
+ * affect the output size.
477
+ * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported.
478
+ * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported.
479
+ * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width].
480
+ * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where
481
+ * output_size.height = ceil(
482
+ * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) /
483
+ * pooling_stride.height) + 1
484
+ * output_size.width = ceil(
485
+ * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) /
486
+ * pooling_stride.width) + 1
487
+ * @param threadpool A thread pool for parallelization of the computation.
488
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
489
+ */
490
+ enum nnp_status nnp_max_pooling_output(
491
+ size_t batch_size,
492
+ size_t channels,
493
+ struct nnp_size input_size,
494
+ struct nnp_padding input_padding,
495
+ struct nnp_size pooling_size,
496
+ struct nnp_size pooling_stride,
497
+ const float input[],
498
+ float output[],
499
+ pthreadpool_t threadpool);
500
+
501
+ /**
502
+ * @brief Computes output of a softmax layer for an input matrix.
503
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
504
+ * propagation. Is is optimized for both large and small minibatch sizes.
505
+ * @param batch_size The number of vectors on the input and output of the softmax layer.
506
+ * @param channels The number of channels (AKA features, dimensions) in both input and output vectors.
507
+ * @param[in] input A 2D matrix input[batch_size][channels].
508
+ * @param[out] output A 2D matrix output[batch_size][channels].
509
+ * @param threadpool A thread pool for parallelization of the computation.
510
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
511
+ */
512
+ enum nnp_status nnp_softmax_output(
513
+ size_t batch_size,
514
+ size_t channels,
515
+ const float input[],
516
+ float output[],
517
+ pthreadpool_t threadpool);
518
+
519
+ /**
520
+ * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix.
521
+ * @details This function targets both prediction and training of convolutional neural networks and performs forward
522
+ * propagation. Is is optimized for both large and small minibatch sizes.
523
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
524
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
525
+ * @param[in] input A 2D matrix input[batch_size][channels].
526
+ * @param[out] output A 2D matrix output[batch_size][channels].
527
+ * @param threadpool A thread pool for parallelization of the computation.
528
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
529
+ */
530
+ enum nnp_status nnp_relu_output(
531
+ size_t batch_size,
532
+ size_t channels,
533
+ const float input[],
534
+ float output[],
535
+ float negative_slope,
536
+ pthreadpool_t threadpool);
537
+
538
+ /**
539
+ * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices.
540
+ * @details This function targets training of convolutional neural networks and performs backward propagation.
541
+ * Is is optimized for both large and small minibatch sizes.
542
+ * @param batch_size The number of vectors on the input and output of the ReLU layer.
543
+ * @param channels The number of channels (AKA features, dimensions) in both input and output matrices.
544
+ * @param[in] input A 2D matrix input[batch_size][channels].
545
+ * @param[out] output A 2D matrix output[batch_size][channels].
546
+ * @param threadpool A thread pool for parallelization of the computation.
547
+ * If threadpool is NULL, the computation would run on the caller thread without parallelization.
548
+ */
549
+ enum nnp_status nnp_relu_input_gradient(
550
+ size_t batch_size,
551
+ size_t channels,
552
+ const float grad_output[],
553
+ const float input[],
554
+ float grad_input[],
555
+ float negative_slope,
556
+ pthreadpool_t threadpool);
557
+
558
+ #ifdef __cplusplus
559
+ } /* extern "C" */
560
+ #endif
561
+
562
+ #ifdef __cplusplus
563
+ // Backward compatible implementations for nnp_convolution_*, if we are in C++
564
+ // mode.
565
+ inline enum nnp_status nnp_convolution_output(
566
+ enum nnp_convolution_algorithm algorithm,
567
+ size_t batch_size,
568
+ size_t input_channels,
569
+ size_t output_channels,
570
+ struct nnp_size input_size,
571
+ struct nnp_padding input_padding,
572
+ struct nnp_size kernel_size,
573
+ const float input[],
574
+ const float kernel[],
575
+ const float bias[],
576
+ float output[],
577
+ pthreadpool_t threadpool,
578
+ struct nnp_profile* profile)
579
+ {
580
+ return nnp_convolution_output(
581
+ algorithm,
582
+ batch_size, input_channels, output_channels,
583
+ input_size, input_padding, kernel_size,
584
+ input, kernel, bias, output,
585
+ NULL, NULL,
586
+ nnp_activation_identity, NULL, threadpool, profile);
587
+ }
588
+
589
+ inline enum nnp_status nnp_convolution_input_gradient(
590
+ enum nnp_convolution_algorithm algorithm,
591
+ size_t batch_size,
592
+ size_t input_channels,
593
+ size_t output_channels,
594
+ struct nnp_size input_size,
595
+ struct nnp_padding input_padding,
596
+ struct nnp_size kernel_size,
597
+ const float grad_output[],
598
+ const float kernel[],
599
+ float grad_input[],
600
+ pthreadpool_t threadpool,
601
+ struct nnp_profile* profile)
602
+ {
603
+ return nnp_convolution_input_gradient(
604
+ algorithm,
605
+ batch_size, input_channels, output_channels,
606
+ input_size, input_padding, kernel_size,
607
+ grad_output, kernel, grad_input,
608
+ NULL, NULL,
609
+ nnp_activation_identity, NULL, threadpool, profile);
610
+ }
611
+
612
+ inline enum nnp_status nnp_convolution_kernel_gradient(
613
+ enum nnp_convolution_algorithm algorithm,
614
+ size_t batch_size,
615
+ size_t input_channels,
616
+ size_t output_channels,
617
+ struct nnp_size input_size,
618
+ struct nnp_padding input_padding,
619
+ struct nnp_size kernel_size,
620
+ const float input[],
621
+ const float grad_output[],
622
+ float grad_kernel[],
623
+ pthreadpool_t threadpool,
624
+ struct nnp_profile* profile)
625
+ {
626
+ return nnp_convolution_kernel_gradient(
627
+ algorithm,
628
+ batch_size, input_channels, output_channels,
629
+ input_size, input_padding, kernel_size,
630
+ input, grad_output, grad_kernel,
631
+ NULL, NULL,
632
+ nnp_activation_identity, NULL, threadpool, profile);
633
+ }
634
+
635
+ inline enum nnp_status nnp_convolution_inference(
636
+ enum nnp_convolution_algorithm algorithm,
637
+ enum nnp_convolution_transform_strategy transform_strategy,
638
+ size_t input_channels,
639
+ size_t output_channels,
640
+ struct nnp_size input_size,
641
+ struct nnp_padding input_padding,
642
+ struct nnp_size kernel_size,
643
+ struct nnp_size output_subsampling,
644
+ const float input[],
645
+ const float kernel[],
646
+ const float bias[],
647
+ float output[],
648
+ pthreadpool_t threadpool,
649
+ struct nnp_profile* profile) {
650
+ return nnp_convolution_inference(
651
+ algorithm, transform_strategy,
652
+ input_channels, output_channels,
653
+ input_size, input_padding, kernel_size, output_subsampling,
654
+ input, kernel, bias, output, NULL, NULL,
655
+ nnp_activation_identity, NULL,
656
+ threadpool, profile);
657
+ }
658
+
659
+ #endif // __cplusplus
env-llmeval/lib/python3.10/site-packages/torch/include/psimd.h ADDED
@@ -0,0 +1,1384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef PSIMD_H
3
+ #define PSIMD_H
4
+
5
+ #if defined(__CUDA_ARCH__)
6
+ /* CUDA compiler */
7
+ #define PSIMD_INTRINSIC __forceinline__ __device__
8
+ #elif defined(__OPENCL_VERSION__)
9
+ /* OpenCL compiler */
10
+ #define PSIMD_INTRINSIC inline static
11
+ #elif defined(__INTEL_COMPILER)
12
+ /* Intel compiler, even on Windows */
13
+ #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
14
+ #elif defined(__GNUC__)
15
+ /* GCC-compatible compiler (gcc/clang/icc) */
16
+ #define PSIMD_INTRINSIC inline static __attribute__((__always_inline__))
17
+ #elif defined(_MSC_VER)
18
+ /* MSVC-compatible compiler (cl/icl/clang-cl) */
19
+ #define PSIMD_INTRINSIC __forceinline static
20
+ #elif defined(__cplusplus)
21
+ /* Generic C++ compiler */
22
+ #define PSIMD_INTRINSIC inline static
23
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
24
+ /* Generic C99 compiler */
25
+ #define PSIMD_INTRINSIC inline static
26
+ #else
27
+ /* Generic C compiler */
28
+ #define PSIMD_INTRINSIC static
29
+ #endif
30
+
31
+ #if defined(__GNUC__) || defined(__clang__)
32
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
33
+ #include <arm_neon.h>
34
+ #endif
35
+
36
+ #if defined(__SSE2__)
37
+ #include <emmintrin.h>
38
+ #endif
39
+
40
+ #if defined(__SSE3__)
41
+ #include <pmmintrin.h>
42
+ #endif
43
+
44
+ #if defined(__SSSE3__)
45
+ #include <tmmintrin.h>
46
+ #endif
47
+
48
+ #if defined(__SSE4_1__)
49
+ #include <smmintrin.h>
50
+ #endif
51
+
52
+ #if defined(__SSE4_2__)
53
+ #include <nmmintrin.h>
54
+ #endif
55
+
56
+ #if defined(__AVX__)
57
+ #include <immintrin.h>
58
+ #endif
59
+ #elif defined(_MSC_VER)
60
+ #include <intrin.h>
61
+ #endif
62
+
63
+ #if defined(__cplusplus)
64
+ #define PSIMD_CXX_SYNTAX
65
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
66
+ #define PSIMD_C11_SYNTAX
67
+ #elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
68
+ #define PSIMD_C99_SYNTAX
69
+ #else
70
+ #define PSIMD_C89_SYNTAX
71
+ #endif
72
+
73
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
74
+ #include <cstddef>
75
+ #include <cstdint>
76
+ #elif !defined(__OPENCL_VERSION__)
77
+ #include <stddef.h>
78
+ #include <stdint.h>
79
+ #endif
80
+
81
+ #if defined(__GNUC__) || defined(__clang__)
82
+ #define PSIMD_HAVE_F64 0
83
+ #define PSIMD_HAVE_F32 1
84
+ #define PSIMD_HAVE_U8 1
85
+ #define PSIMD_HAVE_S8 1
86
+ #define PSIMD_HAVE_U16 1
87
+ #define PSIMD_HAVE_S16 1
88
+ #define PSIMD_HAVE_U32 1
89
+ #define PSIMD_HAVE_S32 1
90
+ #define PSIMD_HAVE_U64 0
91
+ #define PSIMD_HAVE_S64 0
92
+
93
+ typedef int8_t psimd_s8 __attribute__((vector_size(16), aligned(1)));
94
+ typedef uint8_t psimd_u8 __attribute__((vector_size(16), aligned(1)));
95
+ typedef int16_t psimd_s16 __attribute__((vector_size(16), aligned(2)));
96
+ typedef uint16_t psimd_u16 __attribute__((vector_size(16), aligned(2)));
97
+ typedef int32_t psimd_s32 __attribute__((vector_size(16), aligned(4)));
98
+ typedef uint32_t psimd_u32 __attribute__((vector_size(16), aligned(4)));
99
+ typedef float psimd_f32 __attribute__((vector_size(16), aligned(4)));
100
+
101
+ typedef struct {
102
+ psimd_s8 lo;
103
+ psimd_s8 hi;
104
+ } psimd_s8x2;
105
+
106
+ typedef struct {
107
+ psimd_u8 lo;
108
+ psimd_u8 hi;
109
+ } psimd_u8x2;
110
+
111
+ typedef struct {
112
+ psimd_s16 lo;
113
+ psimd_s16 hi;
114
+ } psimd_s16x2;
115
+
116
+ typedef struct {
117
+ psimd_u16 lo;
118
+ psimd_u16 hi;
119
+ } psimd_u16x2;
120
+
121
+ typedef struct {
122
+ psimd_s32 lo;
123
+ psimd_s32 hi;
124
+ } psimd_s32x2;
125
+
126
+ typedef struct {
127
+ psimd_u32 lo;
128
+ psimd_u32 hi;
129
+ } psimd_u32x2;
130
+
131
+ typedef struct {
132
+ psimd_f32 lo;
133
+ psimd_f32 hi;
134
+ } psimd_f32x2;
135
+
136
+ /* Bit casts */
137
+ PSIMD_INTRINSIC psimd_u32x2 psimd_cast_s32x2_u32x2(psimd_s32x2 v) {
138
+ return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
139
+ }
140
+
141
+ PSIMD_INTRINSIC psimd_f32x2 psimd_cast_s32x2_f32x2(psimd_s32x2 v) {
142
+ return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
143
+ }
144
+
145
+ PSIMD_INTRINSIC psimd_s32x2 psimd_cast_u32x2_s32x2(psimd_u32x2 v) {
146
+ return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
147
+ }
148
+
149
+ PSIMD_INTRINSIC psimd_f32x2 psimd_cast_u32x2_f32x2(psimd_u32x2 v) {
150
+ return (psimd_f32x2) { .lo = (psimd_f32) v.lo, .hi = (psimd_f32) v.hi };
151
+ }
152
+
153
+ PSIMD_INTRINSIC psimd_s32x2 psimd_cast_f32x2_s32x2(psimd_f32x2 v) {
154
+ return (psimd_s32x2) { .lo = (psimd_s32) v.lo, .hi = (psimd_s32) v.hi };
155
+ }
156
+
157
+ PSIMD_INTRINSIC psimd_u32x2 psimd_cast_f32x2_u32x2(psimd_f32x2 v) {
158
+ return (psimd_u32x2) { .lo = (psimd_u32) v.lo, .hi = (psimd_u32) v.hi };
159
+ }
160
+
161
+ /* Swap */
162
+ PSIMD_INTRINSIC void psimd_swap_s8(psimd_s8 a[1], psimd_s8 b[1]) {
163
+ const psimd_s8 new_a = *b;
164
+ const psimd_s8 new_b = *a;
165
+ *a = new_a;
166
+ *b = new_b;
167
+ }
168
+
169
+ PSIMD_INTRINSIC void psimd_swap_u8(psimd_u8 a[1], psimd_u8 b[1]) {
170
+ const psimd_u8 new_a = *b;
171
+ const psimd_u8 new_b = *a;
172
+ *a = new_a;
173
+ *b = new_b;
174
+ }
175
+
176
+ PSIMD_INTRINSIC void psimd_swap_s16(psimd_s16 a[1], psimd_s16 b[1]) {
177
+ const psimd_s16 new_a = *b;
178
+ const psimd_s16 new_b = *a;
179
+ *a = new_a;
180
+ *b = new_b;
181
+ }
182
+
183
+ PSIMD_INTRINSIC void psimd_swap_u16(psimd_u16 a[1], psimd_u16 b[1]) {
184
+ const psimd_u16 new_a = *b;
185
+ const psimd_u16 new_b = *a;
186
+ *a = new_a;
187
+ *b = new_b;
188
+ }
189
+
190
+ PSIMD_INTRINSIC void psimd_swap_s32(psimd_s32 a[1], psimd_s32 b[1]) {
191
+ const psimd_s32 new_a = *b;
192
+ const psimd_s32 new_b = *a;
193
+ *a = new_a;
194
+ *b = new_b;
195
+ }
196
+
197
+ PSIMD_INTRINSIC void psimd_swap_u32(psimd_u32 a[1], psimd_u32 b[1]) {
198
+ const psimd_u32 new_a = *b;
199
+ const psimd_u32 new_b = *a;
200
+ *a = new_a;
201
+ *b = new_b;
202
+ }
203
+
204
+ PSIMD_INTRINSIC void psimd_swap_f32(psimd_f32 a[1], psimd_f32 b[1]) {
205
+ const psimd_f32 new_a = *b;
206
+ const psimd_f32 new_b = *a;
207
+ *a = new_a;
208
+ *b = new_b;
209
+ }
210
+
211
+ /* Zero-initialization */
212
+ PSIMD_INTRINSIC psimd_s8 psimd_zero_s8(void) {
213
+ return (psimd_s8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
214
+ }
215
+
216
+ PSIMD_INTRINSIC psimd_u8 psimd_zero_u8(void) {
217
+ return (psimd_u8) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
218
+ }
219
+
220
+ PSIMD_INTRINSIC psimd_s16 psimd_zero_s16(void) {
221
+ return (psimd_s16) { 0, 0, 0, 0, 0, 0, 0, 0 };
222
+ }
223
+
224
+ PSIMD_INTRINSIC psimd_u16 psimd_zero_u16(void) {
225
+ return (psimd_u16) { 0, 0, 0, 0, 0, 0, 0, 0 };
226
+ }
227
+
228
+ PSIMD_INTRINSIC psimd_s32 psimd_zero_s32(void) {
229
+ return (psimd_s32) { 0, 0, 0, 0 };
230
+ }
231
+
232
+ PSIMD_INTRINSIC psimd_u32 psimd_zero_u32(void) {
233
+ return (psimd_u32) { 0, 0, 0, 0 };
234
+ }
235
+
236
+ PSIMD_INTRINSIC psimd_f32 psimd_zero_f32(void) {
237
+ return (psimd_f32) { 0.0f, 0.0f, 0.0f, 0.0f };
238
+ }
239
+
240
+ /* Initialization to the same constant */
241
+ PSIMD_INTRINSIC psimd_s8 psimd_splat_s8(int8_t c) {
242
+ return (psimd_s8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
243
+ }
244
+
245
+ PSIMD_INTRINSIC psimd_u8 psimd_splat_u8(uint8_t c) {
246
+ return (psimd_u8) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c };
247
+ }
248
+
249
+ PSIMD_INTRINSIC psimd_s16 psimd_splat_s16(int16_t c) {
250
+ return (psimd_s16) { c, c, c, c, c, c, c, c };
251
+ }
252
+
253
+ PSIMD_INTRINSIC psimd_u16 psimd_splat_u16(uint16_t c) {
254
+ return (psimd_u16) { c, c, c, c, c, c, c, c };
255
+ }
256
+
257
+ PSIMD_INTRINSIC psimd_s32 psimd_splat_s32(int32_t c) {
258
+ return (psimd_s32) { c, c, c, c };
259
+ }
260
+
261
+ PSIMD_INTRINSIC psimd_u32 psimd_splat_u32(uint32_t c) {
262
+ return (psimd_u32) { c, c, c, c };
263
+ }
264
+
265
+ PSIMD_INTRINSIC psimd_f32 psimd_splat_f32(float c) {
266
+ return (psimd_f32) { c, c, c, c };
267
+ }
268
+
269
+ /* Load vector */
270
+ PSIMD_INTRINSIC psimd_s8 psimd_load_s8(const void* address) {
271
+ return *((const psimd_s8*) address);
272
+ }
273
+
274
+ PSIMD_INTRINSIC psimd_u8 psimd_load_u8(const void* address) {
275
+ return *((const psimd_u8*) address);
276
+ }
277
+
278
+ PSIMD_INTRINSIC psimd_s16 psimd_load_s16(const void* address) {
279
+ return *((const psimd_s16*) address);
280
+ }
281
+
282
+ PSIMD_INTRINSIC psimd_u16 psimd_load_u16(const void* address) {
283
+ return *((const psimd_u16*) address);
284
+ }
285
+
286
+ PSIMD_INTRINSIC psimd_s32 psimd_load_s32(const void* address) {
287
+ return *((const psimd_s32*) address);
288
+ }
289
+
290
+ PSIMD_INTRINSIC psimd_u32 psimd_load_u32(const void* address) {
291
+ return *((const psimd_u32*) address);
292
+ }
293
+
294
+ PSIMD_INTRINSIC psimd_f32 psimd_load_f32(const void* address) {
295
+ return *((const psimd_f32*) address);
296
+ }
297
+
298
+ PSIMD_INTRINSIC psimd_s8 psimd_load_splat_s8(const void* address) {
299
+ return psimd_splat_s8(*((const int8_t*) address));
300
+ }
301
+
302
+ PSIMD_INTRINSIC psimd_u8 psimd_load_splat_u8(const void* address) {
303
+ return psimd_splat_u8(*((const uint8_t*) address));
304
+ }
305
+
306
+ PSIMD_INTRINSIC psimd_s16 psimd_load_splat_s16(const void* address) {
307
+ return psimd_splat_s16(*((const int16_t*) address));
308
+ }
309
+
310
+ PSIMD_INTRINSIC psimd_u16 psimd_load_splat_u16(const void* address) {
311
+ return psimd_splat_u16(*((const uint16_t*) address));
312
+ }
313
+
314
+ PSIMD_INTRINSIC psimd_s32 psimd_load_splat_s32(const void* address) {
315
+ return psimd_splat_s32(*((const int32_t*) address));
316
+ }
317
+
318
+ PSIMD_INTRINSIC psimd_u32 psimd_load_splat_u32(const void* address) {
319
+ return psimd_splat_u32(*((const uint32_t*) address));
320
+ }
321
+
322
+ PSIMD_INTRINSIC psimd_f32 psimd_load_splat_f32(const void* address) {
323
+ return psimd_splat_f32(*((const float*) address));
324
+ }
325
+
326
+ PSIMD_INTRINSIC psimd_s32 psimd_load1_s32(const void* address) {
327
+ return (psimd_s32) { *((const int32_t*) address), 0, 0, 0 };
328
+ }
329
+
330
+ PSIMD_INTRINSIC psimd_u32 psimd_load1_u32(const void* address) {
331
+ return (psimd_u32) { *((const uint32_t*) address), 0, 0, 0 };
332
+ }
333
+
334
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_f32(const void* address) {
335
+ return (psimd_f32) { *((const float*) address), 0.0f, 0.0f, 0.0f };
336
+ }
337
+
338
+ PSIMD_INTRINSIC psimd_s32 psimd_load2_s32(const void* address) {
339
+ const int32_t* address_s32 = (const int32_t*) address;
340
+ return (psimd_s32) { address_s32[0], address_s32[1], 0, 0 };
341
+ }
342
+
343
+ PSIMD_INTRINSIC psimd_u32 psimd_load2_u32(const void* address) {
344
+ const uint32_t* address_u32 = (const uint32_t*) address;
345
+ return (psimd_u32) { address_u32[0], address_u32[1], 0, 0 };
346
+ }
347
+
348
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_f32(const void* address) {
349
+ const float* address_f32 = (const float*) address;
350
+ return (psimd_f32) { address_f32[0], address_f32[1], 0.0f, 0.0f };
351
+ }
352
+
353
+ PSIMD_INTRINSIC psimd_s32 psimd_load3_s32(const void* address) {
354
+ const int32_t* address_s32 = (const int32_t*) address;
355
+ return (psimd_s32) { address_s32[0], address_s32[1], address_s32[2], 0 };
356
+ }
357
+
358
+ PSIMD_INTRINSIC psimd_u32 psimd_load3_u32(const void* address) {
359
+ const uint32_t* address_u32 = (const uint32_t*) address;
360
+ return (psimd_u32) { address_u32[0], address_u32[1], address_u32[2], 0 };
361
+ }
362
+
363
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_f32(const void* address) {
364
+ const float* address_f32 = (const float*) address;
365
+ return (psimd_f32) { address_f32[0], address_f32[1], address_f32[2], 0.0f };
366
+ }
367
+
368
+ PSIMD_INTRINSIC psimd_s32 psimd_load4_s32(const void* address) {
369
+ return psimd_load_s32(address);
370
+ }
371
+
372
+ PSIMD_INTRINSIC psimd_u32 psimd_load4_u32(const void* address) {
373
+ return psimd_load_u32(address);
374
+ }
375
+
376
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_f32(const void* address) {
377
+ return psimd_load_f32(address);
378
+ }
379
+
380
+ PSIMD_INTRINSIC psimd_f32 psimd_load_stride2_f32(const void* address) {
381
+ const psimd_f32 v0x1x = psimd_load_f32(address);
382
+ const psimd_f32 vx2x3 = psimd_load_f32((const float*) address + 3);
383
+ #if defined(__clang__)
384
+ return __builtin_shufflevector(v0x1x, vx2x3, 0, 2, 5, 7);
385
+ #else
386
+ return __builtin_shuffle(v0x1x, vx2x3, (psimd_s32) { 0, 2, 5, 7 });
387
+ #endif
388
+ }
389
+
390
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_stride2_f32(const void* address) {
391
+ return psimd_load_f32(address);
392
+ }
393
+
394
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_stride2_f32(const void* address) {
395
+ const float* address_f32 = (const float*) address;
396
+ return (psimd_f32) { address_f32[0], address_f32[2], 0.0f, 0.0f };
397
+ }
398
+
399
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_stride2_f32(const void* address) {
400
+ const psimd_f32 v0x1x = psimd_load_f32(address);
401
+ const psimd_f32 v2zzz = psimd_load1_f32((const float*) address + 2);
402
+ #if defined(__clang__)
403
+ return __builtin_shufflevector(v0x1x, v2zzz, 0, 2, 4, 6);
404
+ #else
405
+ return __builtin_shuffle(v0x1x, v2zzz, (psimd_s32) { 0, 2, 4, 6 });
406
+ #endif
407
+ }
408
+
409
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_stride2_f32(const void* address) {
410
+ return psimd_load_stride2_f32(address);
411
+ }
412
+
413
+ PSIMD_INTRINSIC psimd_f32 psimd_load_stride_f32(const void* address, size_t stride) {
414
+ const float* address0_f32 = (const float*) address;
415
+ const float* address1_f32 = address0_f32 + stride;
416
+ const float* address2_f32 = address1_f32 + stride;
417
+ const float* address3_f32 = address2_f32 + stride;
418
+ return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, *address3_f32 };
419
+ }
420
+
421
+ PSIMD_INTRINSIC psimd_f32 psimd_load1_stride_f32(const void* address, size_t stride) {
422
+ return psimd_load1_f32(address);
423
+ }
424
+
425
+ PSIMD_INTRINSIC psimd_f32 psimd_load2_stride_f32(const void* address, size_t stride) {
426
+ const float* address_f32 = (const float*) address;
427
+ return (psimd_f32) { address_f32[0], address_f32[stride], 0.0f, 0.0f };
428
+ }
429
+
430
+ PSIMD_INTRINSIC psimd_f32 psimd_load3_stride_f32(const void* address, size_t stride) {
431
+ const float* address0_f32 = (const float*) address;
432
+ const float* address1_f32 = address0_f32 + stride;
433
+ const float* address2_f32 = address1_f32 + stride;
434
+ return (psimd_f32) { *address0_f32, *address1_f32, *address2_f32, 0.0f };
435
+ }
436
+
437
+ PSIMD_INTRINSIC psimd_f32 psimd_load4_stride_f32(const void* address, size_t stride) {
438
+ return psimd_load_stride_f32(address, stride);
439
+ }
440
+
441
+ /* Store vector */
442
+ PSIMD_INTRINSIC void psimd_store_s8(void* address, psimd_s8 value) {
443
+ *((psimd_s8*) address) = value;
444
+ }
445
+
446
+ PSIMD_INTRINSIC void psimd_store_u8(void* address, psimd_u8 value) {
447
+ *((psimd_u8*) address) = value;
448
+ }
449
+
450
+ PSIMD_INTRINSIC void psimd_store_s16(void* address, psimd_s16 value) {
451
+ *((psimd_s16*) address) = value;
452
+ }
453
+
454
+ PSIMD_INTRINSIC void psimd_store_u16(void* address, psimd_u16 value) {
455
+ *((psimd_u16*) address) = value;
456
+ }
457
+
458
+ PSIMD_INTRINSIC void psimd_store_s32(void* address, psimd_s32 value) {
459
+ *((psimd_s32*) address) = value;
460
+ }
461
+
462
+ PSIMD_INTRINSIC void psimd_store_u32(void* address, psimd_u32 value) {
463
+ *((psimd_u32*) address) = value;
464
+ }
465
+
466
+ PSIMD_INTRINSIC void psimd_store_f32(void* address, psimd_f32 value) {
467
+ *((psimd_f32*) address) = value;
468
+ }
469
+
470
+ PSIMD_INTRINSIC void psimd_store1_s32(void* address, psimd_s32 value) {
471
+ *((int32_t*) address) = value[0];
472
+ }
473
+
474
+ PSIMD_INTRINSIC void psimd_store1_u32(void* address, psimd_u32 value) {
475
+ *((uint32_t*) address) = value[0];
476
+ }
477
+
478
+ PSIMD_INTRINSIC void psimd_store1_f32(void* address, psimd_f32 value) {
479
+ *((float*) address) = value[0];
480
+ }
481
+
482
+ PSIMD_INTRINSIC void psimd_store2_s32(void* address, psimd_s32 value) {
483
+ int32_t* address_s32 = (int32_t*) address;
484
+ address_s32[0] = value[0];
485
+ address_s32[1] = value[1];
486
+ }
487
+
488
+ PSIMD_INTRINSIC void psimd_store2_u32(void* address, psimd_u32 value) {
489
+ uint32_t* address_u32 = (uint32_t*) address;
490
+ address_u32[0] = value[0];
491
+ address_u32[1] = value[1];
492
+ }
493
+
494
+ PSIMD_INTRINSIC void psimd_store2_f32(void* address, psimd_f32 value) {
495
+ float* address_f32 = (float*) address;
496
+ address_f32[0] = value[0];
497
+ address_f32[1] = value[1];
498
+ }
499
+
500
+ PSIMD_INTRINSIC void psimd_store3_s32(void* address, psimd_s32 value) {
501
+ int32_t* address_s32 = (int32_t*) address;
502
+ address_s32[0] = value[0];
503
+ address_s32[1] = value[1];
504
+ address_s32[2] = value[2];
505
+ }
506
+
507
+ PSIMD_INTRINSIC void psimd_store3_u32(void* address, psimd_u32 value) {
508
+ uint32_t* address_u32 = (uint32_t*) address;
509
+ address_u32[0] = value[0];
510
+ address_u32[1] = value[1];
511
+ address_u32[2] = value[2];
512
+ }
513
+
514
+ PSIMD_INTRINSIC void psimd_store3_f32(void* address, psimd_f32 value) {
515
+ float* address_f32 = (float*) address;
516
+ address_f32[0] = value[0];
517
+ address_f32[1] = value[1];
518
+ address_f32[2] = value[2];
519
+ }
520
+
521
+ PSIMD_INTRINSIC void psimd_store4_s32(void* address, psimd_s32 value) {
522
+ psimd_store_s32(address, value);
523
+ }
524
+
525
+ PSIMD_INTRINSIC void psimd_store4_u32(void* address, psimd_u32 value) {
526
+ psimd_store_u32(address, value);
527
+ }
528
+
529
+ PSIMD_INTRINSIC void psimd_store4_f32(void* address, psimd_f32 value) {
530
+ psimd_store_f32(address, value);
531
+ }
532
+
533
+ PSIMD_INTRINSIC void psimd_store_stride_f32(void* address, size_t stride, psimd_f32 value) {
534
+ float* address0_f32 = (float*) address;
535
+ float* address1_f32 = address0_f32 + stride;
536
+ float* address2_f32 = address1_f32 + stride;
537
+ float* address3_f32 = address2_f32 + stride;
538
+ *address0_f32 = value[0];
539
+ *address1_f32 = value[1];
540
+ *address2_f32 = value[2];
541
+ *address3_f32 = value[3];
542
+ }
543
+
544
+ PSIMD_INTRINSIC void psimd_store1_stride_f32(void* address, size_t stride, psimd_f32 value) {
545
+ psimd_store1_f32(address, value);
546
+ }
547
+
548
+ PSIMD_INTRINSIC void psimd_store2_stride_f32(void* address, size_t stride, psimd_f32 value) {
549
+ float* address_f32 = (float*) address;
550
+ address_f32[0] = value[0];
551
+ address_f32[stride] = value[1];
552
+ }
553
+
554
+ PSIMD_INTRINSIC void psimd_store3_stride_f32(void* address, size_t stride, psimd_f32 value) {
555
+ float* address0_f32 = (float*) address;
556
+ float* address1_f32 = address0_f32 + stride;
557
+ float* address2_f32 = address1_f32 + stride;
558
+ *address0_f32 = value[0];
559
+ *address1_f32 = value[1];
560
+ *address2_f32 = value[2];
561
+ }
562
+
563
+ /* Vector addition */
564
+ PSIMD_INTRINSIC psimd_s8 psimd_add_s8(psimd_s8 a, psimd_s8 b) {
565
+ return a + b;
566
+ }
567
+
568
+ PSIMD_INTRINSIC psimd_u8 psimd_add_u8(psimd_u8 a, psimd_u8 b) {
569
+ return a + b;
570
+ }
571
+
572
+ PSIMD_INTRINSIC psimd_s16 psimd_add_s16(psimd_s16 a, psimd_s16 b) {
573
+ return a + b;
574
+ }
575
+
576
+ PSIMD_INTRINSIC psimd_u16 psimd_add_u16(psimd_u16 a, psimd_u16 b) {
577
+ return a + b;
578
+ }
579
+
580
+ PSIMD_INTRINSIC psimd_s32 psimd_add_s32(psimd_s32 a, psimd_s32 b) {
581
+ return a + b;
582
+ }
583
+
584
+ PSIMD_INTRINSIC psimd_u32 psimd_add_u32(psimd_u32 a, psimd_u32 b) {
585
+ return a + b;
586
+ }
587
+
588
+ PSIMD_INTRINSIC psimd_f32 psimd_add_f32(psimd_f32 a, psimd_f32 b) {
589
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
590
+ return (psimd_f32) vaddq_f32((float32x4_t) a, (float32x4_t) b);
591
+ #else
592
+ return a + b;
593
+ #endif
594
+ }
595
+
596
+ /* Vector subtraction */
597
+ PSIMD_INTRINSIC psimd_s8 psimd_sub_s8(psimd_s8 a, psimd_s8 b) {
598
+ return a - b;
599
+ }
600
+
601
+ PSIMD_INTRINSIC psimd_u8 psimd_sub_u8(psimd_u8 a, psimd_u8 b) {
602
+ return a - b;
603
+ }
604
+
605
+ PSIMD_INTRINSIC psimd_s16 psimd_sub_s16(psimd_s16 a, psimd_s16 b) {
606
+ return a - b;
607
+ }
608
+
609
+ PSIMD_INTRINSIC psimd_u16 psimd_sub_u16(psimd_u16 a, psimd_u16 b) {
610
+ return a - b;
611
+ }
612
+
613
+ PSIMD_INTRINSIC psimd_s32 psimd_sub_s32(psimd_s32 a, psimd_s32 b) {
614
+ return a - b;
615
+ }
616
+
617
+ PSIMD_INTRINSIC psimd_u32 psimd_sub_u32(psimd_u32 a, psimd_u32 b) {
618
+ return a - b;
619
+ }
620
+
621
+ PSIMD_INTRINSIC psimd_f32 psimd_sub_f32(psimd_f32 a, psimd_f32 b) {
622
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
623
+ return (psimd_f32) vsubq_f32((float32x4_t) a, (float32x4_t) b);
624
+ #else
625
+ return a - b;
626
+ #endif
627
+ }
628
+
629
+ /* Vector multiplication */
630
+ PSIMD_INTRINSIC psimd_s8 psimd_mul_s8(psimd_s8 a, psimd_s8 b) {
631
+ return a * b;
632
+ }
633
+
634
+ PSIMD_INTRINSIC psimd_u8 psimd_mul_u8(psimd_u8 a, psimd_u8 b) {
635
+ return a * b;
636
+ }
637
+
638
+ PSIMD_INTRINSIC psimd_s16 psimd_mul_s16(psimd_s16 a, psimd_s16 b) {
639
+ return a * b;
640
+ }
641
+
642
+ PSIMD_INTRINSIC psimd_u16 psimd_mul_u16(psimd_u16 a, psimd_u16 b) {
643
+ return a * b;
644
+ }
645
+
646
+ PSIMD_INTRINSIC psimd_s32 psimd_mul_s32(psimd_s32 a, psimd_s32 b) {
647
+ return a * b;
648
+ }
649
+
650
+ PSIMD_INTRINSIC psimd_u32 psimd_mul_u32(psimd_u32 a, psimd_u32 b) {
651
+ return a * b;
652
+ }
653
+
654
+ PSIMD_INTRINSIC psimd_f32 psimd_mul_f32(psimd_f32 a, psimd_f32 b) {
655
+ #if defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) && !defined(__FAST_MATH__)
656
+ return (psimd_f32) vmulq_f32((float32x4_t) a, (float32x4_t) b);
657
+ #else
658
+ return a * b;
659
+ #endif
660
+ }
661
+
662
+ /* Quasi-Fused Multiply-Add */
663
+ PSIMD_INTRINSIC psimd_f32 psimd_qfma_f32(psimd_f32 a, psimd_f32 b, psimd_f32 c) {
664
+ #if defined(__aarch64__) || defined(__ARM_NEON__) && defined(__ARM_FEATURE_FMA)
665
+ return (psimd_f32) vfmaq_f32((float32x4_t) a, (float32x4_t) b, (float32x4_t) c);
666
+ #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA__)
667
+ return (psimd_f32) _mm_fmadd_ps((__m128) b, (__m128) c, (__m128) a);
668
+ #elif (defined(__x86_64__) || defined(__i386__) || defined(__i686__)) && defined(__FMA4__)
669
+ return (psimd_f32) _mm_macc_ps((__m128) b, (__m128) c, (__m128) a);
670
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__) && PSIMD_ENABLE_WASM_QFMA
671
+ return (psimd_f32) __builtin_wasm_qfma_f32x4(a, b, c);
672
+ #else
673
+ return a + b * c;
674
+ #endif
675
+ }
676
+
677
+ PSIMD_INTRINSIC psimd_f32 psimd_div_f32(psimd_f32 a, psimd_f32 b) {
678
+ return a / b;
679
+ }
680
+
681
+ /* Vector and */
682
+ PSIMD_INTRINSIC psimd_f32 psimd_andmask_f32(psimd_s32 mask, psimd_f32 v) {
683
+ return (psimd_f32) (mask & (psimd_s32) v);
684
+ }
685
+
686
+ /* Vector and-not */
687
+ PSIMD_INTRINSIC psimd_f32 psimd_andnotmask_f32(psimd_s32 mask, psimd_f32 v) {
688
+ return (psimd_f32) (~mask & (psimd_s32) v);
689
+ }
690
+
691
+ /* Vector blend */
692
+ PSIMD_INTRINSIC psimd_s8 psimd_blend_s8(psimd_s8 mask, psimd_s8 a, psimd_s8 b) {
693
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
694
+ return (psimd_s8) vbslq_s8((uint8x16_t) mask, (int8x16_t) a, (int8x16_t) b);
695
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
696
+ return (psimd_s8) __builtin_wasm_bitselect(a, b, mask);
697
+ #else
698
+ return (mask & a) | (~mask & b);
699
+ #endif
700
+ }
701
+
702
+ PSIMD_INTRINSIC psimd_u8 psimd_blend_u8(psimd_s8 mask, psimd_u8 a, psimd_u8 b) {
703
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
704
+ return (psimd_u8) vbslq_u8((uint8x16_t) mask, (uint8x16_t) a, (uint8x16_t) b);
705
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
706
+ return (psimd_u8) __builtin_wasm_bitselect(a, b, mask);
707
+ #else
708
+ return (psimd_u8) ((mask & (psimd_s8) a) | (~mask & (psimd_s8) b));
709
+ #endif
710
+ }
711
+
712
+ PSIMD_INTRINSIC psimd_s16 psimd_blend_s16(psimd_s16 mask, psimd_s16 a, psimd_s16 b) {
713
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
714
+ return (psimd_s16) vbslq_s16((uint16x8_t) mask, (int16x8_t) a, (int16x8_t) b);
715
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
716
+ return (psimd_s16) __builtin_wasm_bitselect(a, b, mask);
717
+ #else
718
+ return (mask & a) | (~mask & b);
719
+ #endif
720
+ }
721
+
722
+ PSIMD_INTRINSIC psimd_u16 psimd_blend_u16(psimd_s16 mask, psimd_u16 a, psimd_u16 b) {
723
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
724
+ return (psimd_u16) vbslq_u16((uint16x8_t) mask, (uint16x8_t) a, (uint16x8_t) b);
725
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
726
+ return (psimd_u16) __builtin_wasm_bitselect(a, b, mask);
727
+ #else
728
+ return (psimd_u16) ((mask & (psimd_s16) a) | (~mask & (psimd_s16) b));
729
+ #endif
730
+ }
731
+
732
+ PSIMD_INTRINSIC psimd_s32 psimd_blend_s32(psimd_s32 mask, psimd_s32 a, psimd_s32 b) {
733
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
734
+ return (psimd_s32) vbslq_s32((uint32x4_t) mask, (int32x4_t) a, (int32x4_t) b);
735
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
736
+ return (psimd_s32) __builtin_wasm_bitselect(a, b, mask);
737
+ #else
738
+ return (mask & a) | (~mask & b);
739
+ #endif
740
+ }
741
+
742
+ PSIMD_INTRINSIC psimd_u32 psimd_blend_u32(psimd_s32 mask, psimd_u32 a, psimd_u32 b) {
743
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
744
+ return (psimd_u32) vbslq_u32((uint32x4_t) mask, (uint32x4_t) a, (uint32x4_t) b);
745
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
746
+ return (psimd_u32) __builtin_wasm_bitselect(a, b, mask);
747
+ #else
748
+ return (psimd_u32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
749
+ #endif
750
+ }
751
+
752
+ PSIMD_INTRINSIC psimd_f32 psimd_blend_f32(psimd_s32 mask, psimd_f32 a, psimd_f32 b) {
753
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
754
+ return (psimd_f32) vbslq_f32((uint32x4_t) mask, (float32x4_t) a, (float32x4_t) b);
755
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
756
+ return (psimd_f32) __builtin_wasm_bitselect(a, b, mask);
757
+ #else
758
+ return (psimd_f32) ((mask & (psimd_s32) a) | (~mask & (psimd_s32) b));
759
+ #endif
760
+ }
761
+
762
+ /* Vector blend on sign */
763
+ PSIMD_INTRINSIC psimd_s8 psimd_signblend_s8(psimd_s8 x, psimd_s8 a, psimd_s8 b) {
764
+ return psimd_blend_s8(x >> psimd_splat_s8(7), a, b);
765
+ }
766
+
767
+ PSIMD_INTRINSIC psimd_u8 psimd_signblend_u8(psimd_s8 x, psimd_u8 a, psimd_u8 b) {
768
+ return psimd_blend_u8((x >> psimd_splat_s8(7)), a, b);
769
+ }
770
+
771
+ PSIMD_INTRINSIC psimd_s16 psimd_signblend_s16(psimd_s16 x, psimd_s16 a, psimd_s16 b) {
772
+ return psimd_blend_s16(x >> psimd_splat_s16(15), a, b);
773
+ }
774
+
775
+ PSIMD_INTRINSIC psimd_u16 psimd_signblend_u16(psimd_s16 x, psimd_u16 a, psimd_u16 b) {
776
+ return psimd_blend_u16((x >> psimd_splat_s16(15)), a, b);
777
+ }
778
+
779
+ PSIMD_INTRINSIC psimd_s32 psimd_signblend_s32(psimd_s32 x, psimd_s32 a, psimd_s32 b) {
780
+ return psimd_blend_s32(x >> psimd_splat_s32(31), a, b);
781
+ }
782
+
783
+ PSIMD_INTRINSIC psimd_u32 psimd_signblend_u32(psimd_s32 x, psimd_u32 a, psimd_u32 b) {
784
+ return psimd_blend_u32((x >> psimd_splat_s32(31)), a, b);
785
+ }
786
+
787
+ PSIMD_INTRINSIC psimd_f32 psimd_signblend_f32(psimd_f32 x, psimd_f32 a, psimd_f32 b) {
788
+ const psimd_s32 mask = (psimd_s32) x >> psimd_splat_s32(31);
789
+ return psimd_blend_f32(mask, a, b);
790
+ }
791
+
792
+ /* Vector absolute value */
793
+ PSIMD_INTRINSIC psimd_f32 psimd_abs_f32(psimd_f32 v) {
794
+ const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
795
+ return (psimd_f32) ((psimd_s32) v & ~mask);
796
+ }
797
+
798
+ /* Vector negation */
799
+ PSIMD_INTRINSIC psimd_f32 psimd_neg_f32(psimd_f32 v) {
800
+ const psimd_s32 mask = (psimd_s32) psimd_splat_f32(-0.0f);
801
+ return (psimd_f32) ((psimd_s32) v ^ mask);
802
+ }
803
+
804
+ /* Vector maximum */
805
+ PSIMD_INTRINSIC psimd_s8 psimd_max_s8(psimd_s8 a, psimd_s8 b) {
806
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
807
+ return (psimd_s8) vmaxq_s8((int8x16_t) a, (int8x16_t) b);
808
+ #else
809
+ return psimd_blend_s8(a > b, a, b);
810
+ #endif
811
+ }
812
+
813
+ PSIMD_INTRINSIC psimd_u8 psimd_max_u8(psimd_u8 a, psimd_u8 b) {
814
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
815
+ return (psimd_u8) vmaxq_u8((uint8x16_t) a, (uint8x16_t) b);
816
+ #else
817
+ return psimd_blend_u8(a > b, a, b);
818
+ #endif
819
+ }
820
+
821
+ PSIMD_INTRINSIC psimd_s16 psimd_max_s16(psimd_s16 a, psimd_s16 b) {
822
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
823
+ return (psimd_s16) vmaxq_s16((int16x8_t) a, (int16x8_t) b);
824
+ #else
825
+ return psimd_blend_s16(a > b, a, b);
826
+ #endif
827
+ }
828
+
829
+ PSIMD_INTRINSIC psimd_u16 psimd_max_u16(psimd_u16 a, psimd_u16 b) {
830
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
831
+ return (psimd_u16) vmaxq_u16((uint16x8_t) a, (uint16x8_t) b);
832
+ #else
833
+ return psimd_blend_u16(a > b, a, b);
834
+ #endif
835
+ }
836
+
837
+ PSIMD_INTRINSIC psimd_s32 psimd_max_s32(psimd_s32 a, psimd_s32 b) {
838
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
839
+ return (psimd_s32) vmaxq_s32((int32x4_t) a, (int32x4_t) b);
840
+ #else
841
+ return psimd_blend_s32(a > b, a, b);
842
+ #endif
843
+ }
844
+
845
+ PSIMD_INTRINSIC psimd_u32 psimd_max_u32(psimd_u32 a, psimd_u32 b) {
846
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
847
+ return (psimd_u32) vmaxq_u32((uint32x4_t) a, (uint32x4_t) b);
848
+ #else
849
+ return psimd_blend_u32(a > b, a, b);
850
+ #endif
851
+ }
852
+
853
+ PSIMD_INTRINSIC psimd_f32 psimd_max_f32(psimd_f32 a, psimd_f32 b) {
854
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
855
+ return (psimd_f32) vmaxq_f32((float32x4_t) a, (float32x4_t) b);
856
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
857
+ return __builtin_wasm_max_f32x4(a, b);
858
+ #else
859
+ return psimd_blend_f32(a > b, a, b);
860
+ #endif
861
+ }
862
+
863
+ /* Vector minimum */
864
+ PSIMD_INTRINSIC psimd_s8 psimd_min_s8(psimd_s8 a, psimd_s8 b) {
865
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
866
+ return (psimd_s8) vminq_s8((int8x16_t) a, (int8x16_t) b);
867
+ #else
868
+ return psimd_blend_s8(a < b, a, b);
869
+ #endif
870
+ }
871
+
872
+ PSIMD_INTRINSIC psimd_u8 psimd_min_u8(psimd_u8 a, psimd_u8 b) {
873
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
874
+ return (psimd_u8) vminq_u8((uint8x16_t) a, (uint8x16_t) b);
875
+ #else
876
+ return psimd_blend_u8(a < b, a, b);
877
+ #endif
878
+ }
879
+
880
+ PSIMD_INTRINSIC psimd_s16 psimd_min_s16(psimd_s16 a, psimd_s16 b) {
881
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
882
+ return (psimd_s16) vminq_s16((int16x8_t) a, (int16x8_t) b);
883
+ #else
884
+ return psimd_blend_s16(a < b, a, b);
885
+ #endif
886
+ }
887
+
888
+ PSIMD_INTRINSIC psimd_u16 psimd_min_u16(psimd_u16 a, psimd_u16 b) {
889
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
890
+ return (psimd_u16) vminq_u16((uint16x8_t) a, (uint16x8_t) b);
891
+ #else
892
+ return psimd_blend_u16(a < b, a, b);
893
+ #endif
894
+ }
895
+
896
+ PSIMD_INTRINSIC psimd_s32 psimd_min_s32(psimd_s32 a, psimd_s32 b) {
897
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
898
+ return (psimd_s32) vminq_s32((int32x4_t) a, (int32x4_t) b);
899
+ #else
900
+ return psimd_blend_s32(a < b, a, b);
901
+ #endif
902
+ }
903
+
904
+ PSIMD_INTRINSIC psimd_u32 psimd_min_u32(psimd_u32 a, psimd_u32 b) {
905
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
906
+ return (psimd_u32) vminq_u32((uint32x4_t) a, (uint32x4_t) b);
907
+ #else
908
+ return psimd_blend_u32(a < b, a, b);
909
+ #endif
910
+ }
911
+
912
+ PSIMD_INTRINSIC psimd_f32 psimd_min_f32(psimd_f32 a, psimd_f32 b) {
913
+ #if defined(__ARM_NEON__) || defined(__ARM_NEON)
914
+ return (psimd_f32) vminq_f32((float32x4_t) a, (float32x4_t) b);
915
+ #elif defined(__wasm__) && defined(__wasm_simd128__) && defined(__clang__)
916
+ return __builtin_wasm_min_f32x4(a, b);
917
+ #else
918
+ return psimd_blend_f32(a < b, a, b);
919
+ #endif
920
+ }
921
+
922
+ PSIMD_INTRINSIC psimd_f32 psimd_cvt_s32_f32(psimd_s32 v) {
923
+ #if defined(__clang__)
924
+ return __builtin_convertvector(v, psimd_f32);
925
+ #elif defined(__ARM_NEON__) || defined(__ARM_NEON)
926
+ return (psimd_f32) vcvtq_f32_s32((int32x4_t) v);
927
+ #elif defined(__SSE2__)
928
+ return (psimd_f32) _mm_cvtepi32_ps((__m128i) v);
929
+ #else
930
+ return (psimd_f32) { (float) v[0], (float) v[1], (float) v[2], (float) v[3] };
931
+ #endif
932
+ }
933
+
934
+ /* Broadcast vector element */
935
+ #if defined(__clang__)
936
+ PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
937
+ return __builtin_shufflevector(v, v, 0, 0, 0, 0);
938
+ }
939
+
940
+ PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
941
+ return __builtin_shufflevector(v, v, 1, 1, 1, 1);
942
+ }
943
+
944
+ PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
945
+ return __builtin_shufflevector(v, v, 2, 2, 2, 2);
946
+ }
947
+
948
+ PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
949
+ return __builtin_shufflevector(v, v, 3, 3, 3, 3);
950
+ }
951
+ #else
952
+ PSIMD_INTRINSIC psimd_f32 psimd_splat0_f32(psimd_f32 v) {
953
+ return __builtin_shuffle(v, (psimd_s32) { 0, 0, 0, 0 });
954
+ }
955
+
956
+ PSIMD_INTRINSIC psimd_f32 psimd_splat1_f32(psimd_f32 v) {
957
+ return __builtin_shuffle(v, (psimd_s32) { 1, 1, 1, 1 });
958
+ }
959
+
960
+ PSIMD_INTRINSIC psimd_f32 psimd_splat2_f32(psimd_f32 v) {
961
+ return __builtin_shuffle(v, (psimd_s32) { 2, 2, 2, 2 });
962
+ }
963
+
964
+ PSIMD_INTRINSIC psimd_f32 psimd_splat3_f32(psimd_f32 v) {
965
+ return __builtin_shuffle(v, (psimd_s32) { 3, 3, 3, 3 });
966
+ }
967
+ #endif
968
+
969
+ /* Reversal of vector elements */
970
+ #if defined(__clang__)
971
+ PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
972
+ return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
973
+ }
974
+
975
+ PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
976
+ return __builtin_shufflevector(v, v, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
977
+ }
978
+
979
+ PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
980
+ return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
981
+ }
982
+
983
+ PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
984
+ return __builtin_shufflevector(v, v, 7, 6, 5, 4, 3, 2, 1, 0);
985
+ }
986
+
987
+ PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
988
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
989
+ }
990
+
991
+ PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
992
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
993
+ }
994
+
995
+ PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
996
+ return __builtin_shufflevector(v, v, 3, 2, 1, 0);
997
+ }
998
+ #else
999
+ PSIMD_INTRINSIC psimd_s8 psimd_reverse_s8(psimd_s8 v) {
1000
+ return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
1001
+ }
1002
+
1003
+ PSIMD_INTRINSIC psimd_u8 psimd_reverse_u8(psimd_u8 v) {
1004
+ return __builtin_shuffle(v, (psimd_s8) { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 });
1005
+ }
1006
+
1007
+ PSIMD_INTRINSIC psimd_s16 psimd_reverse_s16(psimd_s16 v) {
1008
+ return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
1009
+ }
1010
+
1011
+ PSIMD_INTRINSIC psimd_u16 psimd_reverse_u16(psimd_u16 v) {
1012
+ return __builtin_shuffle(v, (psimd_s16) { 7, 6, 5, 4, 3, 2, 1, 0 });
1013
+ }
1014
+
1015
+ PSIMD_INTRINSIC psimd_s32 psimd_reverse_s32(psimd_s32 v) {
1016
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1017
+ }
1018
+
1019
+ PSIMD_INTRINSIC psimd_u32 psimd_reverse_u32(psimd_u32 v) {
1020
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1021
+ }
1022
+
1023
+ PSIMD_INTRINSIC psimd_f32 psimd_reverse_f32(psimd_f32 v) {
1024
+ return __builtin_shuffle(v, (psimd_s32) { 3, 2, 1, 0 });
1025
+ }
1026
+ #endif
1027
+
1028
+ /* Interleaving of vector elements */
1029
+ #if defined(__clang__)
1030
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
1031
+ return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
1032
+ }
1033
+
1034
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
1035
+ return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
1036
+ }
1037
+
1038
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
1039
+ return __builtin_shufflevector(a, b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
1040
+ }
1041
+
1042
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
1043
+ return __builtin_shufflevector(a, b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
1044
+ }
1045
+
1046
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
1047
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1048
+ }
1049
+
1050
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
1051
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1052
+ }
1053
+
1054
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
1055
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1056
+ }
1057
+
1058
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
1059
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1060
+ }
1061
+
1062
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
1063
+ return __builtin_shufflevector(a, b, 0, 4+0, 1, 4+1);
1064
+ }
1065
+
1066
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
1067
+ return __builtin_shufflevector(a, b, 2, 4+2, 3, 4+3);
1068
+ }
1069
+ #else
1070
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_lo_s16(psimd_s16 a, psimd_s16 b) {
1071
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
1072
+ }
1073
+
1074
+ PSIMD_INTRINSIC psimd_s16 psimd_interleave_hi_s16(psimd_s16 a, psimd_s16 b) {
1075
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
1076
+ }
1077
+
1078
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_lo_u16(psimd_u16 a, psimd_u16 b) {
1079
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3 });
1080
+ }
1081
+
1082
+ PSIMD_INTRINSIC psimd_u16 psimd_interleave_hi_u16(psimd_u16 a, psimd_u16 b) {
1083
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7 });
1084
+ }
1085
+
1086
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_lo_s32(psimd_s32 a, psimd_s32 b) {
1087
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1088
+ }
1089
+
1090
+ PSIMD_INTRINSIC psimd_s32 psimd_interleave_hi_s32(psimd_s32 a, psimd_s32 b) {
1091
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1092
+ }
1093
+
1094
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_lo_u32(psimd_u32 a, psimd_u32 b) {
1095
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1096
+ }
1097
+
1098
+ PSIMD_INTRINSIC psimd_u32 psimd_interleave_hi_u32(psimd_u32 a, psimd_u32 b) {
1099
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1100
+ }
1101
+
1102
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_lo_f32(psimd_f32 a, psimd_f32 b) {
1103
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 4+0, 1, 4+1 });
1104
+ }
1105
+
1106
+ PSIMD_INTRINSIC psimd_f32 psimd_interleave_hi_f32(psimd_f32 a, psimd_f32 b) {
1107
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 4+2, 3, 4+3 });
1108
+ }
1109
+ #endif
1110
+
1111
+ /* Concatenation of low/high vector elements */
1112
+ #if defined(__clang__)
1113
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
1114
+ return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
1115
+ }
1116
+
1117
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
1118
+ return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
1119
+ }
1120
+
1121
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
1122
+ return __builtin_shufflevector(a, b, 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3);
1123
+ }
1124
+
1125
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
1126
+ return __builtin_shufflevector(a, b, 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7);
1127
+ }
1128
+
1129
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
1130
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1131
+ }
1132
+
1133
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
1134
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1135
+ }
1136
+
1137
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
1138
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1139
+ }
1140
+
1141
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
1142
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1143
+ }
1144
+
1145
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
1146
+ return __builtin_shufflevector(a, b, 0, 1, 4+0, 4+1);
1147
+ }
1148
+
1149
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
1150
+ return __builtin_shufflevector(a, b, 2, 3, 4+2, 4+3);
1151
+ }
1152
+ #else
1153
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_lo_s16(psimd_s16 a, psimd_s16 b) {
1154
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
1155
+ }
1156
+
1157
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_hi_s16(psimd_s16 a, psimd_s16 b) {
1158
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
1159
+ }
1160
+
1161
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_lo_u16(psimd_u16 a, psimd_u16 b) {
1162
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 1, 2, 3, 8+0, 8+1, 8+2, 8+3 });
1163
+ }
1164
+
1165
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_hi_u16(psimd_u16 a, psimd_u16 b) {
1166
+ return __builtin_shuffle(a, b, (psimd_s16) { 4, 5, 6, 7, 8+4, 8+5, 8+6, 8+7 });
1167
+ }
1168
+
1169
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_lo_s32(psimd_s32 a, psimd_s32 b) {
1170
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1171
+ }
1172
+
1173
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_hi_s32(psimd_s32 a, psimd_s32 b) {
1174
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1175
+ }
1176
+
1177
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_lo_u32(psimd_u32 a, psimd_u32 b) {
1178
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1179
+ }
1180
+
1181
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_hi_u32(psimd_u32 a, psimd_u32 b) {
1182
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1183
+ }
1184
+
1185
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_lo_f32(psimd_f32 a, psimd_f32 b) {
1186
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 1, 4+0, 4+1 });
1187
+ }
1188
+
1189
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_hi_f32(psimd_f32 a, psimd_f32 b) {
1190
+ return __builtin_shuffle(a, b, (psimd_s32) { 2, 3, 4+2, 4+3 });
1191
+ }
1192
+ #endif
1193
+
1194
+ /* Concatenation of even/odd vector elements */
1195
+ #if defined(__clang__)
1196
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
1197
+ return __builtin_shufflevector(a, b,
1198
+ 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
1199
+ }
1200
+
1201
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
1202
+ return __builtin_shufflevector(a, b,
1203
+ 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
1204
+ }
1205
+
1206
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
1207
+ return __builtin_shufflevector(a, b,
1208
+ 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14);
1209
+ }
1210
+
1211
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
1212
+ return __builtin_shufflevector(a, b,
1213
+ 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15);
1214
+ }
1215
+
1216
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
1217
+ return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
1218
+ }
1219
+
1220
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
1221
+ return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
1222
+ }
1223
+
1224
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
1225
+ return __builtin_shufflevector(a, b, 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6);
1226
+ }
1227
+
1228
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
1229
+ return __builtin_shufflevector(a, b, 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7);
1230
+ }
1231
+
1232
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
1233
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1234
+ }
1235
+
1236
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
1237
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1238
+ }
1239
+
1240
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
1241
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1242
+ }
1243
+
1244
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
1245
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1246
+ }
1247
+
1248
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
1249
+ return __builtin_shufflevector(a, b, 0, 2, 4+0, 4+2);
1250
+ }
1251
+
1252
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
1253
+ return __builtin_shufflevector(a, b, 1, 3, 4+1, 4+3);
1254
+ }
1255
+ #else
1256
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_even_s8(psimd_s8 a, psimd_s8 b) {
1257
+ return __builtin_shuffle(a, b,
1258
+ (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
1259
+ }
1260
+
1261
+ PSIMD_INTRINSIC psimd_s8 psimd_concat_odd_s8(psimd_s8 a, psimd_s8 b) {
1262
+ return __builtin_shuffle(a, b,
1263
+ (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
1264
+ }
1265
+
1266
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_even_u8(psimd_u8 a, psimd_u8 b) {
1267
+ return __builtin_shuffle(a, b,
1268
+ (psimd_s8) { 0, 2, 4, 6, 8, 10, 12, 14, 16+0, 16+2, 16+4, 16+6, 16+8, 16+10, 16+12, 16+14 });
1269
+ }
1270
+
1271
+ PSIMD_INTRINSIC psimd_u8 psimd_concat_odd_u8(psimd_u8 a, psimd_u8 b) {
1272
+ return __builtin_shuffle(a, b,
1273
+ (psimd_s8) { 1, 3, 5, 7, 9, 11, 13, 15, 16+1, 16+3, 16+5, 16+7, 16+9, 16+11, 16+13, 16+15 });
1274
+ }
1275
+
1276
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_even_s16(psimd_s16 a, psimd_s16 b) {
1277
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
1278
+ }
1279
+
1280
+ PSIMD_INTRINSIC psimd_s16 psimd_concat_odd_s16(psimd_s16 a, psimd_s16 b) {
1281
+ return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
1282
+ }
1283
+
1284
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_even_u16(psimd_u16 a, psimd_u16 b) {
1285
+ return __builtin_shuffle(a, b, (psimd_s16) { 0, 2, 4, 6, 8+0, 8+2, 8+4, 8+6 });
1286
+ }
1287
+
1288
+ PSIMD_INTRINSIC psimd_u16 psimd_concat_odd_u16(psimd_u16 a, psimd_u16 b) {
1289
+ return __builtin_shuffle(a, b, (psimd_s16) { 1, 3, 5, 7, 8+1, 8+3, 8+5, 8+7 });
1290
+ }
1291
+
1292
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_even_s32(psimd_s32 a, psimd_s32 b) {
1293
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1294
+ }
1295
+
1296
+ PSIMD_INTRINSIC psimd_s32 psimd_concat_odd_s32(psimd_s32 a, psimd_s32 b) {
1297
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1298
+ }
1299
+
1300
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_even_u32(psimd_u32 a, psimd_u32 b) {
1301
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1302
+ }
1303
+
1304
+ PSIMD_INTRINSIC psimd_u32 psimd_concat_odd_u32(psimd_u32 a, psimd_u32 b) {
1305
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1306
+ }
1307
+
1308
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_even_f32(psimd_f32 a, psimd_f32 b) {
1309
+ return __builtin_shuffle(a, b, (psimd_s32) { 0, 2, 4+0, 4+2 });
1310
+ }
1311
+
1312
+ PSIMD_INTRINSIC psimd_f32 psimd_concat_odd_f32(psimd_f32 a, psimd_f32 b) {
1313
+ return __builtin_shuffle(a, b, (psimd_s32) { 1, 3, 4+1, 4+3 });
1314
+ }
1315
+ #endif
1316
+
1317
+ /* Vector reduce */
1318
+ #if defined(__clang__)
1319
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
1320
+ const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, 0, 1);
1321
+ return temp + __builtin_shufflevector(temp, temp, 1, 0, 3, 2);
1322
+ }
1323
+
1324
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
1325
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
1326
+ return psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
1327
+ }
1328
+
1329
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
1330
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, 0, 1));
1331
+ return psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, 0, 3, 2));
1332
+ }
1333
+
1334
+ PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
1335
+ const psimd_f32 temp = v + __builtin_shufflevector(v, v, 2, 3, -1, -1);
1336
+ const psimd_f32 result = temp + __builtin_shufflevector(temp, temp, 1, -1, -1, -1);
1337
+ return result[0];
1338
+ }
1339
+
1340
+ PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
1341
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
1342
+ const psimd_f32 result = psimd_max_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
1343
+ return result[0];
1344
+ }
1345
+
1346
+ PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
1347
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shufflevector(v, v, 2, 3, -1, -1));
1348
+ const psimd_f32 result = psimd_min_f32(temp, __builtin_shufflevector(temp, temp, 1, -1, -1, -1));
1349
+ return result[0];
1350
+ }
1351
+ #else
1352
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_sum_f32(psimd_f32 v) {
1353
+ const psimd_f32 temp = v + __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 });
1354
+ return temp + __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 });
1355
+ }
1356
+
1357
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_max_f32(psimd_f32 v) {
1358
+ const psimd_f32 temp = psimd_max_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
1359
+ return psimd_max_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
1360
+ }
1361
+
1362
+ PSIMD_INTRINSIC psimd_f32 psimd_allreduce_min_f32(psimd_f32 v) {
1363
+ const psimd_f32 temp = psimd_min_f32(v, __builtin_shuffle(v, (psimd_s32) { 2, 3, 0, 1 }));
1364
+ return psimd_min_f32(temp, __builtin_shuffle(temp, (psimd_s32) { 1, 0, 3, 2 }));
1365
+ }
1366
+
1367
+ PSIMD_INTRINSIC float psimd_reduce_sum_f32(psimd_f32 v) {
1368
+ const psimd_f32 result = psimd_allreduce_sum_f32(v);
1369
+ return result[0];
1370
+ }
1371
+
1372
+ PSIMD_INTRINSIC float psimd_reduce_max_f32(psimd_f32 v) {
1373
+ const psimd_f32 result = psimd_allreduce_max_f32(v);
1374
+ return result[0];
1375
+ }
1376
+
1377
+ PSIMD_INTRINSIC float psimd_reduce_min_f32(psimd_f32 v) {
1378
+ const psimd_f32 result = psimd_allreduce_min_f32(v);
1379
+ return result[0];
1380
+ }
1381
+ #endif
1382
+ #endif
1383
+
1384
+ #endif /* PSIMD_H */
env-llmeval/lib/python3.10/site-packages/torch/include/pthreadpool.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack.h ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+
11
+ #include <stdbool.h>
12
+ #include <stddef.h>
13
+ #include <stdint.h>
14
+
15
+ #include <pthreadpool.h>
16
+
17
+ #ifdef __cplusplus
18
+ extern "C" {
19
+ #endif
20
+
21
+ /**
22
+ * @brief Status code for any QNNPACK function call.
23
+ */
24
+ enum qnnp_status {
25
+ /** The call succeeded, and all output arguments now contain valid data. */
26
+ qnnp_status_success = 0,
27
+ qnnp_status_uninitialized = 1,
28
+ qnnp_status_invalid_parameter = 2,
29
+ qnnp_status_unsupported_parameter = 3,
30
+ qnnp_status_unsupported_hardware = 4,
31
+ qnnp_status_out_of_memory = 5,
32
+ };
33
+
34
+ enum qnnp_status qnnp_initialize(void);
35
+
36
+ enum qnnp_status qnnp_deinitialize(void);
37
+
38
+ typedef struct qnnp_operator* qnnp_operator_t;
39
+
40
+ enum qnnp_status qnnp_create_convolution2d_nhwc_q8(
41
+ uint32_t input_padding_top,
42
+ uint32_t input_padding_right,
43
+ uint32_t input_padding_bottom,
44
+ uint32_t input_padding_left,
45
+ uint32_t kernel_height,
46
+ uint32_t kernel_width,
47
+ uint32_t subsampling_height,
48
+ uint32_t subsampling_width,
49
+ uint32_t dilation_height,
50
+ uint32_t dilation_width,
51
+ uint32_t groups,
52
+ size_t group_input_channels,
53
+ size_t group_output_channels,
54
+ uint8_t input_zero_point,
55
+ float input_scale,
56
+ uint8_t kernel_zero_point,
57
+ float kernel_scale,
58
+ const uint8_t* kernel,
59
+ const int32_t* bias,
60
+ uint8_t output_zero_point,
61
+ float output_scale,
62
+ uint8_t output_min,
63
+ uint8_t output_max,
64
+ uint32_t flags,
65
+ qnnp_operator_t* convolution);
66
+
67
+ enum qnnp_status qnnp_setup_convolution2d_nhwc_q8(
68
+ qnnp_operator_t convolution,
69
+ size_t batch_size,
70
+ size_t input_height,
71
+ size_t input_width,
72
+ const uint8_t* input,
73
+ size_t input_stride,
74
+ uint8_t* output,
75
+ size_t output_stride,
76
+ pthreadpool_t threadpool);
77
+
78
+ enum qnnp_status qnnp_create_deconvolution2d_nhwc_q8(
79
+ uint32_t input_padding_top,
80
+ uint32_t input_padding_right,
81
+ uint32_t input_padding_bottom,
82
+ uint32_t input_padding_left,
83
+ uint32_t adjustment_height,
84
+ uint32_t adjustment_width,
85
+ uint32_t kernel_height,
86
+ uint32_t kernel_width,
87
+ uint32_t stride_height,
88
+ uint32_t stride_width,
89
+ uint32_t dilation_height,
90
+ uint32_t dilation_width,
91
+ uint32_t groups,
92
+ size_t group_input_channels,
93
+ size_t group_output_channels,
94
+ uint8_t input_zero_point,
95
+ float input_scale,
96
+ uint8_t kernel_zero_point,
97
+ float kernel_scale,
98
+ const uint8_t* kernel,
99
+ const int32_t* bias,
100
+ uint8_t output_zero_point,
101
+ float output_scale,
102
+ uint8_t output_min,
103
+ uint8_t output_max,
104
+ uint32_t flags,
105
+ qnnp_operator_t* deconvolution);
106
+
107
+ enum qnnp_status qnnp_setup_deconvolution2d_nhwc_q8(
108
+ qnnp_operator_t deconvolution,
109
+ size_t batch_size,
110
+ size_t input_height,
111
+ size_t input_width,
112
+ const uint8_t* input,
113
+ size_t input_stride,
114
+ uint8_t* output,
115
+ size_t output_stride,
116
+ pthreadpool_t threadpool);
117
+
118
+ enum qnnp_status qnnp_create_fully_connected_nc_q8(
119
+ size_t input_channels,
120
+ size_t output_channels,
121
+ uint8_t input_zero_point,
122
+ float input_scale,
123
+ uint8_t kernel_zero_point,
124
+ float kernel_scale,
125
+ const uint8_t* kernel,
126
+ const int32_t* bias,
127
+ uint8_t output_zero_point,
128
+ float output_scale,
129
+ uint8_t output_min,
130
+ uint8_t output_max,
131
+ uint32_t flags,
132
+ qnnp_operator_t* fully_connected);
133
+
134
+ enum qnnp_status qnnp_setup_fully_connected_nc_q8(
135
+ qnnp_operator_t fully_connected,
136
+ size_t batch_size,
137
+ const uint8_t* input,
138
+ size_t input_stride,
139
+ uint8_t* output,
140
+ size_t output_stride);
141
+
142
+ enum qnnp_status qnnp_create_global_average_pooling_nwc_q8(
143
+ size_t channels,
144
+ uint8_t input_zero_point,
145
+ float input_scale,
146
+ uint8_t output_zero_point,
147
+ float output_scale,
148
+ uint8_t output_min,
149
+ uint8_t output_max,
150
+ uint32_t flags,
151
+ qnnp_operator_t* global_average_pooling);
152
+
153
+ enum qnnp_status qnnp_setup_global_average_pooling_nwc_q8(
154
+ qnnp_operator_t global_average_pooling,
155
+ size_t batch_size,
156
+ size_t width,
157
+ const uint8_t* input,
158
+ size_t input_stride,
159
+ uint8_t* output,
160
+ size_t output_stride);
161
+
162
+ enum qnnp_status qnnp_create_average_pooling2d_nhwc_q8(
163
+ uint32_t input_padding_top,
164
+ uint32_t input_padding_right,
165
+ uint32_t input_padding_bottom,
166
+ uint32_t input_padding_left,
167
+ uint32_t pooling_height,
168
+ uint32_t pooling_width,
169
+ uint32_t stride_height,
170
+ uint32_t stride_width,
171
+ size_t channels,
172
+ uint8_t input_zero_point,
173
+ float input_scale,
174
+ uint8_t output_zero_point,
175
+ float output_scale,
176
+ uint8_t output_min,
177
+ uint8_t output_max,
178
+ uint32_t flags,
179
+ qnnp_operator_t* average_pooling);
180
+
181
+ enum qnnp_status qnnp_setup_average_pooling2d_nhwc_q8(
182
+ qnnp_operator_t average_pooling,
183
+ size_t batch_size,
184
+ size_t input_height,
185
+ size_t input_width,
186
+ const uint8_t* input,
187
+ size_t input_stride,
188
+ uint8_t* output,
189
+ size_t output_stride,
190
+ pthreadpool_t threadpool);
191
+
192
+ enum qnnp_status qnnp_create_max_pooling2d_nhwc_u8(
193
+ uint32_t input_padding_top,
194
+ uint32_t input_padding_right,
195
+ uint32_t input_padding_bottom,
196
+ uint32_t input_padding_left,
197
+ uint32_t pooling_height,
198
+ uint32_t pooling_width,
199
+ uint32_t stride_height,
200
+ uint32_t stride_width,
201
+ uint32_t dilation_height,
202
+ uint32_t dilation_width,
203
+ size_t channels,
204
+ uint8_t output_min,
205
+ uint8_t output_max,
206
+ uint32_t flags,
207
+ qnnp_operator_t* max_pooling);
208
+
209
+ enum qnnp_status qnnp_setup_max_pooling2d_nhwc_u8(
210
+ qnnp_operator_t max_pooling,
211
+ size_t batch_size,
212
+ size_t input_height,
213
+ size_t input_width,
214
+ const uint8_t* input,
215
+ size_t input_stride,
216
+ uint8_t* output,
217
+ size_t output_stride,
218
+ pthreadpool_t threadpool);
219
+
220
+ enum qnnp_status qnnp_create_channel_shuffle_nc_x8(
221
+ size_t groups,
222
+ size_t group_channels,
223
+ uint32_t flags,
224
+ qnnp_operator_t* channel_shuffle);
225
+
226
+ enum qnnp_status qnnp_setup_channel_shuffle_nc_x8(
227
+ qnnp_operator_t channel_shuffle,
228
+ size_t batch_size,
229
+ const uint8_t* input,
230
+ size_t input_stride,
231
+ uint8_t* output,
232
+ size_t output_stride);
233
+
234
+ enum qnnp_status qnnp_create_add_nc_q8(
235
+ size_t channels,
236
+ uint8_t a_zero_point,
237
+ float a_scale,
238
+ uint8_t b_zero_point,
239
+ float b_scale,
240
+ uint8_t sum_zero_point,
241
+ float sum_scale,
242
+ uint8_t sum_min,
243
+ uint8_t sum_max,
244
+ uint32_t flags,
245
+ qnnp_operator_t* add);
246
+
247
+ enum qnnp_status qnnp_setup_add_nc_q8(
248
+ qnnp_operator_t add,
249
+ size_t batch_size,
250
+ const uint8_t* a,
251
+ size_t a_stride,
252
+ const uint8_t* b,
253
+ size_t b_stride,
254
+ uint8_t* sum,
255
+ size_t sum_stride);
256
+
257
+ enum qnnp_status qnnp_create_clamp_nc_u8(
258
+ size_t channels,
259
+ uint8_t output_min,
260
+ uint8_t output_max,
261
+ uint32_t flags,
262
+ qnnp_operator_t* clamp);
263
+
264
+ enum qnnp_status qnnp_setup_clamp_nc_u8(
265
+ qnnp_operator_t clamp,
266
+ size_t batch_size,
267
+ const uint8_t* input,
268
+ size_t input_stride,
269
+ uint8_t* output,
270
+ size_t output_stride);
271
+
272
+ enum qnnp_status qnnp_create_sigmoid_nc_q8(
273
+ size_t channels,
274
+ uint8_t input_zero_point,
275
+ float input_scale,
276
+ uint8_t output_zero_point,
277
+ float output_scale,
278
+ uint8_t output_min,
279
+ uint8_t output_max,
280
+ uint32_t flags,
281
+ qnnp_operator_t* sigmoid);
282
+
283
+ enum qnnp_status qnnp_setup_sigmoid_nc_q8(
284
+ qnnp_operator_t sigmoid,
285
+ size_t batch_size,
286
+ const uint8_t* input,
287
+ size_t input_stride,
288
+ uint8_t* output,
289
+ size_t output_stride);
290
+
291
+ enum qnnp_status qnnp_create_leaky_relu_nc_q8(
292
+ size_t channels,
293
+ float negative_slope,
294
+ uint8_t input_zero_point,
295
+ float input_scale,
296
+ uint8_t output_zero_point,
297
+ float output_scale,
298
+ uint8_t output_min,
299
+ uint8_t output_max,
300
+ uint32_t flags,
301
+ qnnp_operator_t* leaky_relu);
302
+
303
+ enum qnnp_status qnnp_setup_leaky_relu_nc_q8(
304
+ qnnp_operator_t leaky_relu,
305
+ size_t batch_size,
306
+ const uint8_t* input,
307
+ size_t input_stride,
308
+ uint8_t* output,
309
+ size_t output_stride);
310
+
311
+ enum qnnp_status qnnp_create_softargmax_nc_q8(
312
+ size_t channels,
313
+ float input_scale,
314
+ uint8_t output_zero_point,
315
+ float output_scale,
316
+ uint32_t flags,
317
+ qnnp_operator_t* softargmax);
318
+
319
+ enum qnnp_status qnnp_setup_softargmax_nc_q8(
320
+ qnnp_operator_t softargmax,
321
+ size_t batch_size,
322
+ const uint8_t* input,
323
+ size_t input_stride,
324
+ uint8_t* output,
325
+ size_t output_stride);
326
+
327
+ enum qnnp_status qnnp_run_operator(
328
+ qnnp_operator_t op,
329
+ pthreadpool_t threadpool);
330
+
331
+ enum qnnp_status qnnp_delete_operator(
332
+ qnnp_operator_t op);
333
+
334
+ #ifdef __cplusplus
335
+ } /* extern "C" */
336
+ #endif
env-llmeval/lib/python3.10/site-packages/torch/include/qnnpack_func.h ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdlib>
4
+ #include <qnnpack/operator.h>
5
+
6
+ namespace qnnpack {
7
+ class PrePackConvWeights final {
8
+ public:
9
+ PrePackConvWeights(
10
+ const pytorch_qnnp_operator_t convolution,
11
+ const uint8_t* kernel_zero_points,
12
+ const uint8_t* kernel,
13
+ const int32_t* bias);
14
+
15
+ void* getPackedWeights() const
16
+ {
17
+ return packed_weights_;
18
+ }
19
+
20
+ int64_t getOutputChannels() const
21
+ {
22
+ return output_channels_;
23
+ }
24
+
25
+ ~PrePackConvWeights()
26
+ {
27
+ if (packed_weights_ != nullptr) {
28
+ free(packed_weights_);
29
+ }
30
+ }
31
+
32
+ PrePackConvWeights() = delete;
33
+ PrePackConvWeights(const PrePackConvWeights&) = delete;
34
+ PrePackConvWeights& operator=(const PrePackConvWeights&) = delete;
35
+
36
+ private:
37
+ void* packed_weights_ = nullptr;
38
+ int64_t output_channels_;
39
+ };
40
+
41
+ class PackBMatrix final {
42
+ public:
43
+ PackBMatrix(
44
+ size_t input_channels,
45
+ size_t output_channels,
46
+ const uint8_t* kernel_zero_points,
47
+ const float* requantization_scale,
48
+ const uint8_t* kernel,
49
+ const int32_t* bias);
50
+
51
+ // This constructor is to be used for dynamic mode
52
+ // quantization. In dynamic mode, we dont yet support
53
+ // per channel quantization, and paying the cost of
54
+ // memory allocation for per channel zero point and
55
+ // requant scale will hurt performance.
56
+ PackBMatrix(
57
+ size_t input_channels,
58
+ size_t output_channels,
59
+ const uint8_t kernel_zero_point,
60
+ const float requantization_scale,
61
+ const uint8_t* kernel,
62
+ const int32_t* bias);
63
+
64
+ void* getPackedWeights() const
65
+ {
66
+ return packed_weights_;
67
+ }
68
+
69
+ void unpackWeights(
70
+ const uint8_t* kernel_zero_points,
71
+ int8_t* kernel
72
+ ) const;
73
+
74
+ size_t getInputChannels() const
75
+ {
76
+ return input_channels_;
77
+ }
78
+
79
+ size_t getOutputChannels() const
80
+ {
81
+ return output_channels_;
82
+ }
83
+
84
+ ~PackBMatrix()
85
+ {
86
+ if (packed_weights_ != nullptr) {
87
+ free(packed_weights_);
88
+ }
89
+ }
90
+
91
+ PackBMatrix() = delete;
92
+ PackBMatrix(const PackBMatrix&) = delete;
93
+ PackBMatrix& operator=(const PackBMatrix&) = delete;
94
+
95
+ private:
96
+ void* packed_weights_ = nullptr;
97
+ size_t input_channels_;
98
+ size_t output_channels_;
99
+ };
100
+
101
+ enum pytorch_qnnp_status qnnpackLinear(
102
+ const size_t batch_size,
103
+ const size_t input_channels,
104
+ const size_t output_channels,
105
+ const uint8_t input_zero_point,
106
+ const uint8_t* kernel_zero_points,
107
+ const float* requantization_scales,
108
+ const uint8_t output_zero_point,
109
+ const uint8_t output_min,
110
+ const uint8_t output_max,
111
+ const uint8_t* input,
112
+ const size_t input_stride,
113
+ void* packed_weights,
114
+ uint8_t* output,
115
+ const size_t output_stride,
116
+ pthreadpool_t threadpool);
117
+
118
+ enum pytorch_qnnp_status qnnpackConv(
119
+ const pytorch_qnnp_operator_t convolution,
120
+ void* packed_weights,
121
+ const size_t batch_size,
122
+ const size_t input_depth,
123
+ const size_t input_height,
124
+ const size_t input_width,
125
+ const uint8_t input_zero_point,
126
+ const uint8_t* input,
127
+ const uint8_t* kernel_zero_points,
128
+ const float* requantization_scales,
129
+ const uint8_t output_zero_point,
130
+ const uint8_t output_min,
131
+ const uint8_t output_max,
132
+ uint8_t* output,
133
+ pthreadpool_t threadpool);
134
+
135
+ enum pytorch_qnnp_status qnnpackDeConv(
136
+ const pytorch_qnnp_operator_t deconvolution,
137
+ void* packed_weights,
138
+ const size_t batch_size,
139
+ const size_t input_height,
140
+ const size_t input_width,
141
+ const uint8_t input_zero_point,
142
+ const uint8_t* input,
143
+ const uint8_t* kernel_zero_points,
144
+ const float* requantization_scales,
145
+ const uint8_t output_zero_point,
146
+ const uint8_t output_min,
147
+ const uint8_t output_max,
148
+ uint8_t* output,
149
+ pthreadpool_t threadpool);
150
+
151
+ enum pytorch_qnnp_status qnnpackLinearDynamic(
152
+ const size_t batch_size,
153
+ const size_t input_channels,
154
+ const size_t output_channels,
155
+ const uint8_t input_zero_point,
156
+ const uint8_t* kernel_zero_points,
157
+ const float* dequantization_scales,
158
+ const uint8_t* input,
159
+ const size_t input_stride,
160
+ void* packed_weights,
161
+ const float* bias,
162
+ float* output,
163
+ const size_t output_stride,
164
+ pthreadpool_t threadpool);
165
+
166
+ } // namespace qnnpack
env-llmeval/lib/python3.10/site-packages/torch/include/sleef.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/include/xnnpack.h ADDED
The diff for this file is too large to render. See raw diff