applied-ai-018 commited on
Commit
1fdfd93
·
verified ·
1 Parent(s): 20d933c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/__init__.py +0 -0
  3. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h +891 -0
  7. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h +1815 -0
  8. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h +693 -0
  9. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h +0 -0
  10. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h +478 -0
  11. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h +824 -0
  12. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py +0 -0
  13. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h +64 -0
  18. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h +65 -0
  19. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h +452 -0
  20. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h +108 -0
  21. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h +174 -0
  22. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h +99 -0
  23. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h +212 -0
  24. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h +634 -0
  25. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h +338 -0
  26. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h +189 -0
  27. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h +135 -0
  28. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h +133 -0
  29. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h +429 -0
  30. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h +320 -0
  31. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h +267 -0
  32. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h +62 -0
  33. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h +63 -0
  34. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h +63 -0
  35. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h +282 -0
  36. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h +280 -0
  37. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp +1546 -0
  38. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h +514 -0
  39. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h +201 -0
  40. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h +65 -0
  41. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h +81 -0
  42. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h +0 -0
  43. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h +65 -0
  44. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h +103 -0
  45. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h +152 -0
  46. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h +141 -0
  47. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp +588 -0
  48. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h +124 -0
  49. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_indirect_functions.h +638 -0
  50. env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h +177 -0
.gitattributes CHANGED
@@ -140,3 +140,4 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.
140
  env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text
141
  env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
142
  env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 filter=lfs diff=lfs merge=lfs -text
 
 
140
  env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.8 filter=lfs diff=lfs merge=lfs -text
141
  env-llmeval/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
142
  env-llmeval/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_train.so.8 filter=lfs diff=lfs merge=lfs -text
143
+ env-llmeval/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas.h ADDED
@@ -0,0 +1,891 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the CUBLAS library, defining the API
52
+ *
53
+ * CUBLAS is an implementation of BLAS (Basic Linear Algebra Subroutines)
54
+ * on top of the CUDA runtime.
55
+ */
56
+
57
+ #if !defined(CUBLAS_H_)
58
+ #define CUBLAS_H_
59
+
60
+ #if defined(CUBLAS_V2_H_)
61
+ #error "It is an error to include both cublas.h and cublas_v2.h"
62
+ #endif
63
+
64
+ #include <cuda_runtime.h>
65
+
66
+ #ifndef CUBLASWINAPI
67
+ #ifdef _WIN32
68
+ #define CUBLASWINAPI __stdcall
69
+ #else
70
+ #define CUBLASWINAPI
71
+ #endif
72
+ #endif
73
+
74
+ #undef CUBLASAPI
75
+ #ifdef __CUDACC__
76
+ #define CUBLASAPI __host__
77
+ #else
78
+ #define CUBLASAPI
79
+ #endif
80
+
81
+ #include "cublas_api.h"
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif
86
+
87
+ /* CUBLAS data types */
88
+ #define cublasStatus cublasStatus_t
89
+
90
+ cublasStatus CUBLASWINAPI cublasInit(void);
91
+ cublasStatus CUBLASWINAPI cublasShutdown(void);
92
+ cublasStatus CUBLASWINAPI cublasGetError(void);
93
+
94
+ cublasStatus CUBLASWINAPI cublasGetVersion(int* version);
95
+ cublasStatus CUBLASWINAPI cublasAlloc(int n, int elemSize, void** devicePtr);
96
+
97
+ cublasStatus CUBLASWINAPI cublasFree(void* devicePtr);
98
+
99
+ cublasStatus CUBLASWINAPI cublasSetKernelStream(cudaStream_t stream);
100
+
101
+ /* ---------------- CUBLAS BLAS1 functions ---------------- */
102
+ /* NRM2 */
103
+ float CUBLASWINAPI cublasSnrm2(int n, const float* x, int incx);
104
+ double CUBLASWINAPI cublasDnrm2(int n, const double* x, int incx);
105
+ float CUBLASWINAPI cublasScnrm2(int n, const cuComplex* x, int incx);
106
+ double CUBLASWINAPI cublasDznrm2(int n, const cuDoubleComplex* x, int incx);
107
+ /*------------------------------------------------------------------------*/
108
+ /* DOT */
109
+ float CUBLASWINAPI cublasSdot(int n, const float* x, int incx, const float* y, int incy);
110
+ double CUBLASWINAPI cublasDdot(int n, const double* x, int incx, const double* y, int incy);
111
+ cuComplex CUBLASWINAPI cublasCdotu(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
112
+ cuComplex CUBLASWINAPI cublasCdotc(int n, const cuComplex* x, int incx, const cuComplex* y, int incy);
113
+ cuDoubleComplex CUBLASWINAPI cublasZdotu(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
114
+ cuDoubleComplex CUBLASWINAPI cublasZdotc(int n, const cuDoubleComplex* x, int incx, const cuDoubleComplex* y, int incy);
115
+ /*------------------------------------------------------------------------*/
116
+ /* SCAL */
117
+ void CUBLASWINAPI cublasSscal(int n, float alpha, float* x, int incx);
118
+ void CUBLASWINAPI cublasDscal(int n, double alpha, double* x, int incx);
119
+ void CUBLASWINAPI cublasCscal(int n, cuComplex alpha, cuComplex* x, int incx);
120
+ void CUBLASWINAPI cublasZscal(int n, cuDoubleComplex alpha, cuDoubleComplex* x, int incx);
121
+
122
+ void CUBLASWINAPI cublasCsscal(int n, float alpha, cuComplex* x, int incx);
123
+ void CUBLASWINAPI cublasZdscal(int n, double alpha, cuDoubleComplex* x, int incx);
124
+ /*------------------------------------------------------------------------*/
125
+ /* AXPY */
126
+ void CUBLASWINAPI cublasSaxpy(int n, float alpha, const float* x, int incx, float* y, int incy);
127
+ void CUBLASWINAPI cublasDaxpy(int n, double alpha, const double* x, int incx, double* y, int incy);
128
+ void CUBLASWINAPI cublasCaxpy(int n, cuComplex alpha, const cuComplex* x, int incx, cuComplex* y, int incy);
129
+ void CUBLASWINAPI
130
+ cublasZaxpy(int n, cuDoubleComplex alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
131
+ /*------------------------------------------------------------------------*/
132
+ /* COPY */
133
+ void CUBLASWINAPI cublasScopy(int n, const float* x, int incx, float* y, int incy);
134
+ void CUBLASWINAPI cublasDcopy(int n, const double* x, int incx, double* y, int incy);
135
+ void CUBLASWINAPI cublasCcopy(int n, const cuComplex* x, int incx, cuComplex* y, int incy);
136
+ void CUBLASWINAPI cublasZcopy(int n, const cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
137
+ /*------------------------------------------------------------------------*/
138
+ /* SWAP */
139
+ void CUBLASWINAPI cublasSswap(int n, float* x, int incx, float* y, int incy);
140
+ void CUBLASWINAPI cublasDswap(int n, double* x, int incx, double* y, int incy);
141
+ void CUBLASWINAPI cublasCswap(int n, cuComplex* x, int incx, cuComplex* y, int incy);
142
+ void CUBLASWINAPI cublasZswap(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy);
143
+ /*------------------------------------------------------------------------*/
144
+ /* AMAX */
145
+ int CUBLASWINAPI cublasIsamax(int n, const float* x, int incx);
146
+ int CUBLASWINAPI cublasIdamax(int n, const double* x, int incx);
147
+ int CUBLASWINAPI cublasIcamax(int n, const cuComplex* x, int incx);
148
+ int CUBLASWINAPI cublasIzamax(int n, const cuDoubleComplex* x, int incx);
149
+ /*------------------------------------------------------------------------*/
150
+ /* AMIN */
151
+ int CUBLASWINAPI cublasIsamin(int n, const float* x, int incx);
152
+ int CUBLASWINAPI cublasIdamin(int n, const double* x, int incx);
153
+
154
+ int CUBLASWINAPI cublasIcamin(int n, const cuComplex* x, int incx);
155
+ int CUBLASWINAPI cublasIzamin(int n, const cuDoubleComplex* x, int incx);
156
+ /*------------------------------------------------------------------------*/
157
+ /* ASUM */
158
+ float CUBLASWINAPI cublasSasum(int n, const float* x, int incx);
159
+ double CUBLASWINAPI cublasDasum(int n, const double* x, int incx);
160
+ float CUBLASWINAPI cublasScasum(int n, const cuComplex* x, int incx);
161
+ double CUBLASWINAPI cublasDzasum(int n, const cuDoubleComplex* x, int incx);
162
+ /*------------------------------------------------------------------------*/
163
+ /* ROT */
164
+ void CUBLASWINAPI cublasSrot(int n, float* x, int incx, float* y, int incy, float sc, float ss);
165
+ void CUBLASWINAPI cublasDrot(int n, double* x, int incx, double* y, int incy, double sc, double ss);
166
+ void CUBLASWINAPI cublasCrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, cuComplex s);
167
+ void CUBLASWINAPI
168
+ cublasZrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double sc, cuDoubleComplex cs);
169
+ void CUBLASWINAPI cublasCsrot(int n, cuComplex* x, int incx, cuComplex* y, int incy, float c, float s);
170
+ void CUBLASWINAPI cublasZdrot(int n, cuDoubleComplex* x, int incx, cuDoubleComplex* y, int incy, double c, double s);
171
+ /*------------------------------------------------------------------------*/
172
+ /* ROTG */
173
+ void CUBLASWINAPI cublasSrotg(float* sa, float* sb, float* sc, float* ss);
174
+ void CUBLASWINAPI cublasDrotg(double* sa, double* sb, double* sc, double* ss);
175
+ void CUBLASWINAPI cublasCrotg(cuComplex* ca, cuComplex cb, float* sc, cuComplex* cs);
176
+ void CUBLASWINAPI cublasZrotg(cuDoubleComplex* ca, cuDoubleComplex cb, double* sc, cuDoubleComplex* cs);
177
+ /*------------------------------------------------------------------------*/
178
+ /* ROTM */
179
+ void CUBLASWINAPI cublasSrotm(int n, float* x, int incx, float* y, int incy, const float* sparam);
180
+ void CUBLASWINAPI cublasDrotm(int n, double* x, int incx, double* y, int incy, const double* sparam);
181
+ /*------------------------------------------------------------------------*/
182
+ /* ROTMG */
183
+ void CUBLASWINAPI cublasSrotmg(float* sd1, float* sd2, float* sx1, const float* sy1, float* sparam);
184
+ void CUBLASWINAPI cublasDrotmg(double* sd1, double* sd2, double* sx1, const double* sy1, double* sparam);
185
+
186
+ /* --------------- CUBLAS BLAS2 functions ---------------- */
187
+ /* GEMV */
188
+ void CUBLASWINAPI cublasSgemv(char trans,
189
+ int m,
190
+ int n,
191
+ float alpha,
192
+ const float* A,
193
+ int lda,
194
+ const float* x,
195
+ int incx,
196
+ float beta,
197
+ float* y,
198
+ int incy);
199
+ void CUBLASWINAPI cublasDgemv(char trans,
200
+ int m,
201
+ int n,
202
+ double alpha,
203
+ const double* A,
204
+ int lda,
205
+ const double* x,
206
+ int incx,
207
+ double beta,
208
+ double* y,
209
+ int incy);
210
+ void CUBLASWINAPI cublasCgemv(char trans,
211
+ int m,
212
+ int n,
213
+ cuComplex alpha,
214
+ const cuComplex* A,
215
+ int lda,
216
+ const cuComplex* x,
217
+ int incx,
218
+ cuComplex beta,
219
+ cuComplex* y,
220
+ int incy);
221
+ void CUBLASWINAPI cublasZgemv(char trans,
222
+ int m,
223
+ int n,
224
+ cuDoubleComplex alpha,
225
+ const cuDoubleComplex* A,
226
+ int lda,
227
+ const cuDoubleComplex* x,
228
+ int incx,
229
+ cuDoubleComplex beta,
230
+ cuDoubleComplex* y,
231
+ int incy);
232
+ /*------------------------------------------------------------------------*/
233
+ /* GBMV */
234
+ void CUBLASWINAPI cublasSgbmv(char trans,
235
+ int m,
236
+ int n,
237
+ int kl,
238
+ int ku,
239
+ float alpha,
240
+ const float* A,
241
+ int lda,
242
+ const float* x,
243
+ int incx,
244
+ float beta,
245
+ float* y,
246
+ int incy);
247
+ void CUBLASWINAPI cublasDgbmv(char trans,
248
+ int m,
249
+ int n,
250
+ int kl,
251
+ int ku,
252
+ double alpha,
253
+ const double* A,
254
+ int lda,
255
+ const double* x,
256
+ int incx,
257
+ double beta,
258
+ double* y,
259
+ int incy);
260
+ void CUBLASWINAPI cublasCgbmv(char trans,
261
+ int m,
262
+ int n,
263
+ int kl,
264
+ int ku,
265
+ cuComplex alpha,
266
+ const cuComplex* A,
267
+ int lda,
268
+ const cuComplex* x,
269
+ int incx,
270
+ cuComplex beta,
271
+ cuComplex* y,
272
+ int incy);
273
+ void CUBLASWINAPI cublasZgbmv(char trans,
274
+ int m,
275
+ int n,
276
+ int kl,
277
+ int ku,
278
+ cuDoubleComplex alpha,
279
+ const cuDoubleComplex* A,
280
+ int lda,
281
+ const cuDoubleComplex* x,
282
+ int incx,
283
+ cuDoubleComplex beta,
284
+ cuDoubleComplex* y,
285
+ int incy);
286
+ /*------------------------------------------------------------------------*/
287
+ /* TRMV */
288
+ void CUBLASWINAPI cublasStrmv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
289
+ void CUBLASWINAPI cublasDtrmv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
290
+ void CUBLASWINAPI
291
+ cublasCtrmv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
292
+ void CUBLASWINAPI
293
+ cublasZtrmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
294
+ /*------------------------------------------------------------------------*/
295
+ /* TBMV */
296
+ void CUBLASWINAPI
297
+ cublasStbmv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
298
+ void CUBLASWINAPI
299
+ cublasDtbmv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
300
+ void CUBLASWINAPI
301
+ cublasCtbmv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
302
+ void CUBLASWINAPI cublasZtbmv(
303
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
304
+ /*------------------------------------------------------------------------*/
305
+ /* TPMV */
306
+ void CUBLASWINAPI cublasStpmv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
307
+
308
+ void CUBLASWINAPI cublasDtpmv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
309
+
310
+ void CUBLASWINAPI cublasCtpmv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
311
+
312
+ void CUBLASWINAPI
313
+ cublasZtpmv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
314
+ /*------------------------------------------------------------------------*/
315
+ /* TRSV */
316
+ void CUBLASWINAPI cublasStrsv(char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incx);
317
+
318
+ void CUBLASWINAPI cublasDtrsv(char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incx);
319
+
320
+ void CUBLASWINAPI
321
+ cublasCtrsv(char uplo, char trans, char diag, int n, const cuComplex* A, int lda, cuComplex* x, int incx);
322
+
323
+ void CUBLASWINAPI
324
+ cublasZtrsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
325
+ /*------------------------------------------------------------------------*/
326
+ /* TPSV */
327
+ void CUBLASWINAPI cublasStpsv(char uplo, char trans, char diag, int n, const float* AP, float* x, int incx);
328
+
329
+ void CUBLASWINAPI cublasDtpsv(char uplo, char trans, char diag, int n, const double* AP, double* x, int incx);
330
+
331
+ void CUBLASWINAPI cublasCtpsv(char uplo, char trans, char diag, int n, const cuComplex* AP, cuComplex* x, int incx);
332
+
333
+ void CUBLASWINAPI
334
+ cublasZtpsv(char uplo, char trans, char diag, int n, const cuDoubleComplex* AP, cuDoubleComplex* x, int incx);
335
+ /*------------------------------------------------------------------------*/
336
+ /* TBSV */
337
+ void CUBLASWINAPI
338
+ cublasStbsv(char uplo, char trans, char diag, int n, int k, const float* A, int lda, float* x, int incx);
339
+
340
+ void CUBLASWINAPI
341
+ cublasDtbsv(char uplo, char trans, char diag, int n, int k, const double* A, int lda, double* x, int incx);
342
+ void CUBLASWINAPI
343
+ cublasCtbsv(char uplo, char trans, char diag, int n, int k, const cuComplex* A, int lda, cuComplex* x, int incx);
344
+
345
+ void CUBLASWINAPI cublasZtbsv(
346
+ char uplo, char trans, char diag, int n, int k, const cuDoubleComplex* A, int lda, cuDoubleComplex* x, int incx);
347
+ /*------------------------------------------------------------------------*/
348
+ /* SYMV/HEMV */
349
+ void CUBLASWINAPI cublasSsymv(
350
+ char uplo, int n, float alpha, const float* A, int lda, const float* x, int incx, float beta, float* y, int incy);
351
+ void CUBLASWINAPI cublasDsymv(char uplo,
352
+ int n,
353
+ double alpha,
354
+ const double* A,
355
+ int lda,
356
+ const double* x,
357
+ int incx,
358
+ double beta,
359
+ double* y,
360
+ int incy);
361
+ void CUBLASWINAPI cublasChemv(char uplo,
362
+ int n,
363
+ cuComplex alpha,
364
+ const cuComplex* A,
365
+ int lda,
366
+ const cuComplex* x,
367
+ int incx,
368
+ cuComplex beta,
369
+ cuComplex* y,
370
+ int incy);
371
+ void CUBLASWINAPI cublasZhemv(char uplo,
372
+ int n,
373
+ cuDoubleComplex alpha,
374
+ const cuDoubleComplex* A,
375
+ int lda,
376
+ const cuDoubleComplex* x,
377
+ int incx,
378
+ cuDoubleComplex beta,
379
+ cuDoubleComplex* y,
380
+ int incy);
381
+ /*------------------------------------------------------------------------*/
382
+ /* SBMV/HBMV */
383
+ void CUBLASWINAPI cublasSsbmv(char uplo,
384
+ int n,
385
+ int k,
386
+ float alpha,
387
+ const float* A,
388
+ int lda,
389
+ const float* x,
390
+ int incx,
391
+ float beta,
392
+ float* y,
393
+ int incy);
394
+ void CUBLASWINAPI cublasDsbmv(char uplo,
395
+ int n,
396
+ int k,
397
+ double alpha,
398
+ const double* A,
399
+ int lda,
400
+ const double* x,
401
+ int incx,
402
+ double beta,
403
+ double* y,
404
+ int incy);
405
+ void CUBLASWINAPI cublasChbmv(char uplo,
406
+ int n,
407
+ int k,
408
+ cuComplex alpha,
409
+ const cuComplex* A,
410
+ int lda,
411
+ const cuComplex* x,
412
+ int incx,
413
+ cuComplex beta,
414
+ cuComplex* y,
415
+ int incy);
416
+ void CUBLASWINAPI cublasZhbmv(char uplo,
417
+ int n,
418
+ int k,
419
+ cuDoubleComplex alpha,
420
+ const cuDoubleComplex* A,
421
+ int lda,
422
+ const cuDoubleComplex* x,
423
+ int incx,
424
+ cuDoubleComplex beta,
425
+ cuDoubleComplex* y,
426
+ int incy);
427
+ /*------------------------------------------------------------------------*/
428
+ /* SPMV/HPMV */
429
+ void CUBLASWINAPI
430
+ cublasSspmv(char uplo, int n, float alpha, const float* AP, const float* x, int incx, float beta, float* y, int incy);
431
+ void CUBLASWINAPI cublasDspmv(
432
+ char uplo, int n, double alpha, const double* AP, const double* x, int incx, double beta, double* y, int incy);
433
+ void CUBLASWINAPI cublasChpmv(char uplo,
434
+ int n,
435
+ cuComplex alpha,
436
+ const cuComplex* AP,
437
+ const cuComplex* x,
438
+ int incx,
439
+ cuComplex beta,
440
+ cuComplex* y,
441
+ int incy);
442
+ void CUBLASWINAPI cublasZhpmv(char uplo,
443
+ int n,
444
+ cuDoubleComplex alpha,
445
+ const cuDoubleComplex* AP,
446
+ const cuDoubleComplex* x,
447
+ int incx,
448
+ cuDoubleComplex beta,
449
+ cuDoubleComplex* y,
450
+ int incy);
451
+
452
+ /*------------------------------------------------------------------------*/
453
+ /* GER */
454
+ void CUBLASWINAPI
455
+ cublasSger(int m, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
456
+ void CUBLASWINAPI
457
+ cublasDger(int m, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
458
+
459
+ void CUBLASWINAPI cublasCgeru(
460
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
461
+ void CUBLASWINAPI cublasCgerc(
462
+ int m, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* A, int lda);
463
+ void CUBLASWINAPI cublasZgeru(int m,
464
+ int n,
465
+ cuDoubleComplex alpha,
466
+ const cuDoubleComplex* x,
467
+ int incx,
468
+ const cuDoubleComplex* y,
469
+ int incy,
470
+ cuDoubleComplex* A,
471
+ int lda);
472
+ void CUBLASWINAPI cublasZgerc(int m,
473
+ int n,
474
+ cuDoubleComplex alpha,
475
+ const cuDoubleComplex* x,
476
+ int incx,
477
+ const cuDoubleComplex* y,
478
+ int incy,
479
+ cuDoubleComplex* A,
480
+ int lda);
481
+ /*------------------------------------------------------------------------*/
482
+ /* SYR/HER */
483
+ void CUBLASWINAPI cublasSsyr(char uplo, int n, float alpha, const float* x, int incx, float* A, int lda);
484
+ void CUBLASWINAPI cublasDsyr(char uplo, int n, double alpha, const double* x, int incx, double* A, int lda);
485
+
486
+ void CUBLASWINAPI cublasCher(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* A, int lda);
487
+ void CUBLASWINAPI
488
+ cublasZher(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* A, int lda);
489
+
490
+ /*------------------------------------------------------------------------*/
491
+ /* SPR/HPR */
492
+ void CUBLASWINAPI cublasSspr(char uplo, int n, float alpha, const float* x, int incx, float* AP);
493
+ void CUBLASWINAPI cublasDspr(char uplo, int n, double alpha, const double* x, int incx, double* AP);
494
+ void CUBLASWINAPI cublasChpr(char uplo, int n, float alpha, const cuComplex* x, int incx, cuComplex* AP);
495
+ void CUBLASWINAPI cublasZhpr(char uplo, int n, double alpha, const cuDoubleComplex* x, int incx, cuDoubleComplex* AP);
496
+ /*------------------------------------------------------------------------*/
497
+ /* SYR2/HER2 */
498
+ void CUBLASWINAPI
499
+ cublasSsyr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* A, int lda);
500
+ void CUBLASWINAPI
501
+ cublasDsyr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* A, int lda);
502
+ void CUBLASWINAPI cublasCher2(char uplo,
503
+ int n,
504
+ cuComplex alpha,
505
+ const cuComplex* x,
506
+ int incx,
507
+ const cuComplex* y,
508
+ int incy,
509
+ cuComplex* A,
510
+ int lda);
511
+ void CUBLASWINAPI cublasZher2(char uplo,
512
+ int n,
513
+ cuDoubleComplex alpha,
514
+ const cuDoubleComplex* x,
515
+ int incx,
516
+ const cuDoubleComplex* y,
517
+ int incy,
518
+ cuDoubleComplex* A,
519
+ int lda);
520
+
521
+ /*------------------------------------------------------------------------*/
522
+ /* SPR2/HPR2 */
523
+ void CUBLASWINAPI
524
+ cublasSspr2(char uplo, int n, float alpha, const float* x, int incx, const float* y, int incy, float* AP);
525
+ void CUBLASWINAPI
526
+ cublasDspr2(char uplo, int n, double alpha, const double* x, int incx, const double* y, int incy, double* AP);
527
+ void CUBLASWINAPI cublasChpr2(
528
+ char uplo, int n, cuComplex alpha, const cuComplex* x, int incx, const cuComplex* y, int incy, cuComplex* AP);
529
+ void CUBLASWINAPI cublasZhpr2(char uplo,
530
+ int n,
531
+ cuDoubleComplex alpha,
532
+ const cuDoubleComplex* x,
533
+ int incx,
534
+ const cuDoubleComplex* y,
535
+ int incy,
536
+ cuDoubleComplex* AP);
537
+ /* ------------------------BLAS3 Functions ------------------------------- */
538
+ /* GEMM */
539
+ void CUBLASWINAPI cublasSgemm(char transa,
540
+ char transb,
541
+ int m,
542
+ int n,
543
+ int k,
544
+ float alpha,
545
+ const float* A,
546
+ int lda,
547
+ const float* B,
548
+ int ldb,
549
+ float beta,
550
+ float* C,
551
+ int ldc);
552
+ void CUBLASWINAPI cublasDgemm(char transa,
553
+ char transb,
554
+ int m,
555
+ int n,
556
+ int k,
557
+ double alpha,
558
+ const double* A,
559
+ int lda,
560
+ const double* B,
561
+ int ldb,
562
+ double beta,
563
+ double* C,
564
+ int ldc);
565
+ void CUBLASWINAPI cublasCgemm(char transa,
566
+ char transb,
567
+ int m,
568
+ int n,
569
+ int k,
570
+ cuComplex alpha,
571
+ const cuComplex* A,
572
+ int lda,
573
+ const cuComplex* B,
574
+ int ldb,
575
+ cuComplex beta,
576
+ cuComplex* C,
577
+ int ldc);
578
+ void CUBLASWINAPI cublasZgemm(char transa,
579
+ char transb,
580
+ int m,
581
+ int n,
582
+ int k,
583
+ cuDoubleComplex alpha,
584
+ const cuDoubleComplex* A,
585
+ int lda,
586
+ const cuDoubleComplex* B,
587
+ int ldb,
588
+ cuDoubleComplex beta,
589
+ cuDoubleComplex* C,
590
+ int ldc);
591
+ /* -------------------------------------------------------*/
592
+ /* SYRK */
593
+ void CUBLASWINAPI
594
+ cublasSsyrk(char uplo, char trans, int n, int k, float alpha, const float* A, int lda, float beta, float* C, int ldc);
595
+ void CUBLASWINAPI cublasDsyrk(
596
+ char uplo, char trans, int n, int k, double alpha, const double* A, int lda, double beta, double* C, int ldc);
597
+
598
+ void CUBLASWINAPI cublasCsyrk(char uplo,
599
+ char trans,
600
+ int n,
601
+ int k,
602
+ cuComplex alpha,
603
+ const cuComplex* A,
604
+ int lda,
605
+ cuComplex beta,
606
+ cuComplex* C,
607
+ int ldc);
608
+ void CUBLASWINAPI cublasZsyrk(char uplo,
609
+ char trans,
610
+ int n,
611
+ int k,
612
+ cuDoubleComplex alpha,
613
+ const cuDoubleComplex* A,
614
+ int lda,
615
+ cuDoubleComplex beta,
616
+ cuDoubleComplex* C,
617
+ int ldc);
618
+ /* ------------------------------------------------------- */
619
+ /* HERK */
620
+ void CUBLASWINAPI cublasCherk(
621
+ char uplo, char trans, int n, int k, float alpha, const cuComplex* A, int lda, float beta, cuComplex* C, int ldc);
622
+ void CUBLASWINAPI cublasZherk(char uplo,
623
+ char trans,
624
+ int n,
625
+ int k,
626
+ double alpha,
627
+ const cuDoubleComplex* A,
628
+ int lda,
629
+ double beta,
630
+ cuDoubleComplex* C,
631
+ int ldc);
632
+ /* ------------------------------------------------------- */
633
+ /* SYR2K */
634
+ void CUBLASWINAPI cublasSsyr2k(char uplo,
635
+ char trans,
636
+ int n,
637
+ int k,
638
+ float alpha,
639
+ const float* A,
640
+ int lda,
641
+ const float* B,
642
+ int ldb,
643
+ float beta,
644
+ float* C,
645
+ int ldc);
646
+
647
+ void CUBLASWINAPI cublasDsyr2k(char uplo,
648
+ char trans,
649
+ int n,
650
+ int k,
651
+ double alpha,
652
+ const double* A,
653
+ int lda,
654
+ const double* B,
655
+ int ldb,
656
+ double beta,
657
+ double* C,
658
+ int ldc);
659
+ void CUBLASWINAPI cublasCsyr2k(char uplo,
660
+ char trans,
661
+ int n,
662
+ int k,
663
+ cuComplex alpha,
664
+ const cuComplex* A,
665
+ int lda,
666
+ const cuComplex* B,
667
+ int ldb,
668
+ cuComplex beta,
669
+ cuComplex* C,
670
+ int ldc);
671
+
672
+ void CUBLASWINAPI cublasZsyr2k(char uplo,
673
+ char trans,
674
+ int n,
675
+ int k,
676
+ cuDoubleComplex alpha,
677
+ const cuDoubleComplex* A,
678
+ int lda,
679
+ const cuDoubleComplex* B,
680
+ int ldb,
681
+ cuDoubleComplex beta,
682
+ cuDoubleComplex* C,
683
+ int ldc);
684
+ /* ------------------------------------------------------- */
685
+ /* HER2K */
686
+ void CUBLASWINAPI cublasCher2k(char uplo,
687
+ char trans,
688
+ int n,
689
+ int k,
690
+ cuComplex alpha,
691
+ const cuComplex* A,
692
+ int lda,
693
+ const cuComplex* B,
694
+ int ldb,
695
+ float beta,
696
+ cuComplex* C,
697
+ int ldc);
698
+
699
+ void CUBLASWINAPI cublasZher2k(char uplo,
700
+ char trans,
701
+ int n,
702
+ int k,
703
+ cuDoubleComplex alpha,
704
+ const cuDoubleComplex* A,
705
+ int lda,
706
+ const cuDoubleComplex* B,
707
+ int ldb,
708
+ double beta,
709
+ cuDoubleComplex* C,
710
+ int ldc);
711
+
712
+ /*------------------------------------------------------------------------*/
713
+ /* SYMM*/
714
+ void CUBLASWINAPI cublasSsymm(char side,
715
+ char uplo,
716
+ int m,
717
+ int n,
718
+ float alpha,
719
+ const float* A,
720
+ int lda,
721
+ const float* B,
722
+ int ldb,
723
+ float beta,
724
+ float* C,
725
+ int ldc);
726
+ void CUBLASWINAPI cublasDsymm(char side,
727
+ char uplo,
728
+ int m,
729
+ int n,
730
+ double alpha,
731
+ const double* A,
732
+ int lda,
733
+ const double* B,
734
+ int ldb,
735
+ double beta,
736
+ double* C,
737
+ int ldc);
738
+
739
+ void CUBLASWINAPI cublasCsymm(char side,
740
+ char uplo,
741
+ int m,
742
+ int n,
743
+ cuComplex alpha,
744
+ const cuComplex* A,
745
+ int lda,
746
+ const cuComplex* B,
747
+ int ldb,
748
+ cuComplex beta,
749
+ cuComplex* C,
750
+ int ldc);
751
+
752
+ void CUBLASWINAPI cublasZsymm(char side,
753
+ char uplo,
754
+ int m,
755
+ int n,
756
+ cuDoubleComplex alpha,
757
+ const cuDoubleComplex* A,
758
+ int lda,
759
+ const cuDoubleComplex* B,
760
+ int ldb,
761
+ cuDoubleComplex beta,
762
+ cuDoubleComplex* C,
763
+ int ldc);
764
+ /*------------------------------------------------------------------------*/
765
+ /* HEMM*/
766
+ void CUBLASWINAPI cublasChemm(char side,
767
+ char uplo,
768
+ int m,
769
+ int n,
770
+ cuComplex alpha,
771
+ const cuComplex* A,
772
+ int lda,
773
+ const cuComplex* B,
774
+ int ldb,
775
+ cuComplex beta,
776
+ cuComplex* C,
777
+ int ldc);
778
+ void CUBLASWINAPI cublasZhemm(char side,
779
+ char uplo,
780
+ int m,
781
+ int n,
782
+ cuDoubleComplex alpha,
783
+ const cuDoubleComplex* A,
784
+ int lda,
785
+ const cuDoubleComplex* B,
786
+ int ldb,
787
+ cuDoubleComplex beta,
788
+ cuDoubleComplex* C,
789
+ int ldc);
790
+
791
+ /*------------------------------------------------------------------------*/
792
+ /* TRSM*/
793
+ void CUBLASWINAPI cublasStrsm(char side,
794
+ char uplo,
795
+ char transa,
796
+ char diag,
797
+ int m,
798
+ int n,
799
+ float alpha,
800
+ const float* A,
801
+ int lda,
802
+ float* B,
803
+ int ldb);
804
+
805
+ void CUBLASWINAPI cublasDtrsm(char side,
806
+ char uplo,
807
+ char transa,
808
+ char diag,
809
+ int m,
810
+ int n,
811
+ double alpha,
812
+ const double* A,
813
+ int lda,
814
+ double* B,
815
+ int ldb);
816
+
817
+ void CUBLASWINAPI cublasCtrsm(char side,
818
+ char uplo,
819
+ char transa,
820
+ char diag,
821
+ int m,
822
+ int n,
823
+ cuComplex alpha,
824
+ const cuComplex* A,
825
+ int lda,
826
+ cuComplex* B,
827
+ int ldb);
828
+
829
+ void CUBLASWINAPI cublasZtrsm(char side,
830
+ char uplo,
831
+ char transa,
832
+ char diag,
833
+ int m,
834
+ int n,
835
+ cuDoubleComplex alpha,
836
+ const cuDoubleComplex* A,
837
+ int lda,
838
+ cuDoubleComplex* B,
839
+ int ldb);
840
+ /*------------------------------------------------------------------------*/
841
+ /* TRMM*/
842
+ void CUBLASWINAPI cublasStrmm(char side,
843
+ char uplo,
844
+ char transa,
845
+ char diag,
846
+ int m,
847
+ int n,
848
+ float alpha,
849
+ const float* A,
850
+ int lda,
851
+ float* B,
852
+ int ldb);
853
+ void CUBLASWINAPI cublasDtrmm(char side,
854
+ char uplo,
855
+ char transa,
856
+ char diag,
857
+ int m,
858
+ int n,
859
+ double alpha,
860
+ const double* A,
861
+ int lda,
862
+ double* B,
863
+ int ldb);
864
+ void CUBLASWINAPI cublasCtrmm(char side,
865
+ char uplo,
866
+ char transa,
867
+ char diag,
868
+ int m,
869
+ int n,
870
+ cuComplex alpha,
871
+ const cuComplex* A,
872
+ int lda,
873
+ cuComplex* B,
874
+ int ldb);
875
+ void CUBLASWINAPI cublasZtrmm(char side,
876
+ char uplo,
877
+ char transa,
878
+ char diag,
879
+ int m,
880
+ int n,
881
+ cuDoubleComplex alpha,
882
+ const cuDoubleComplex* A,
883
+ int lda,
884
+ cuDoubleComplex* B,
885
+ int ldb);
886
+
887
+ #if defined(__cplusplus)
888
+ }
889
+ #endif /* __cplusplus */
890
+
891
+ #endif /* !defined(CUBLAS_H_) */
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublasLt.h ADDED
@@ -0,0 +1,1815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+ #pragma once
50
+
51
+ #ifndef CUBLASAPI
52
+ #ifdef __CUDACC__
53
+ #define CUBLASAPI __host__ __device__
54
+ #else
55
+ #define CUBLASAPI
56
+ #endif
57
+ #endif
58
+
59
+ #include <cublas_api.h>
60
+
61
+ #include <stdint.h>
62
+ #include <stddef.h>
63
+ #include <stdio.h>
64
+
65
+ #if defined(__cplusplus)
66
+ extern "C" {
67
+ #endif /* __cplusplus */
68
+
69
+ /** Opaque structure holding CUBLASLT context
70
+ */
71
+ typedef struct cublasLtContext* cublasLtHandle_t;
72
+
73
+ cublasStatus_t CUBLASWINAPI cublasLtCreate(cublasLtHandle_t* lightHandle);
74
+
75
+ cublasStatus_t CUBLASWINAPI cublasLtDestroy(cublasLtHandle_t lightHandle);
76
+
77
+ const char* CUBLASWINAPI cublasLtGetStatusName(cublasStatus_t status);
78
+
79
+ const char* CUBLASWINAPI cublasLtGetStatusString(cublasStatus_t status);
80
+
81
+ size_t CUBLASWINAPI cublasLtGetVersion(void);
82
+
83
+ size_t CUBLASWINAPI cublasLtGetCudartVersion(void);
84
+
85
+ cublasStatus_t CUBLASWINAPI cublasLtGetProperty(libraryPropertyType type, int* value);
86
+
87
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheGetCapacity(size_t* capacity);
88
+ cublasStatus_t CUBLASWINAPI cublasLtHeuristicsCacheSetCapacity(size_t capacity);
89
+
90
+ /** Restricts usage of CPU instructions (ISA) specified by the flags in the mask.
91
+ *
92
+ * Flags can be combined with bitwise OR(|) operator. Supported flags:
93
+ * - 0x1 -- x86-64 AVX512 ISA
94
+ *
95
+ * Default mask: 0 (any applicable ISA is allowed).
96
+ *
97
+ * The function returns the previous value of the mask.
98
+ * The function takes precedence over the environment variable CUBLASLT_DISABLE_CPU_INSTRUCTIONS_MASK.
99
+ */
100
+ unsigned CUBLASWINAPI cublasLtDisableCpuInstructionsSetMask(unsigned mask);
101
+
102
+ /** Semi-opaque descriptor for matrix memory layout
103
+ */
104
+ typedef struct {
105
+ uint64_t data[8];
106
+ } cublasLtMatrixLayoutOpaque_t;
107
+
108
+ /** Opaque descriptor for matrix memory layout
109
+ */
110
+ typedef cublasLtMatrixLayoutOpaque_t* cublasLtMatrixLayout_t;
111
+
112
+ /** Semi-opaque algorithm descriptor (to avoid complicated alloc/free schemes)
113
+ *
114
+ * This structure can be trivially serialized and later restored for use with the same version of cuBLAS library to save
115
+ * on selecting the right configuration again.
116
+ */
117
+ typedef struct {
118
+ uint64_t data[8];
119
+ } cublasLtMatmulAlgo_t;
120
+
121
+ /** Semi-opaque descriptor for cublasLtMatmul() operation details
122
+ */
123
+ typedef struct {
124
+ uint64_t data[23];
125
+ } cublasLtMatmulDescOpaque_t;
126
+
127
+ /** Opaque descriptor for cublasLtMatmul() operation details
128
+ */
129
+ typedef cublasLtMatmulDescOpaque_t* cublasLtMatmulDesc_t;
130
+
131
+ /** Semi-opaque descriptor for cublasLtMatrixTransform() operation details
132
+ */
133
+ typedef struct {
134
+ uint64_t data[8];
135
+ } cublasLtMatrixTransformDescOpaque_t;
136
+
137
+ /** Opaque descriptor for cublasLtMatrixTransform() operation details
138
+ */
139
+ typedef cublasLtMatrixTransformDescOpaque_t* cublasLtMatrixTransformDesc_t;
140
+
141
+ /** Semi-opaque descriptor for cublasLtMatmulPreference() operation details
142
+ */
143
+ typedef struct {
144
+ uint64_t data[8];
145
+ } cublasLtMatmulPreferenceOpaque_t;
146
+
147
+ /** Opaque descriptor for cublasLtMatmulAlgoGetHeuristic() configuration
148
+ */
149
+ typedef cublasLtMatmulPreferenceOpaque_t* cublasLtMatmulPreference_t;
150
+
151
+ /** Tile size (in C/D matrix Rows x Cols)
152
+ *
153
+ * General order of tile IDs is sorted by size first and by first dimension second.
154
+ */
155
+ typedef enum {
156
+ CUBLASLT_MATMUL_TILE_UNDEFINED = 0,
157
+ CUBLASLT_MATMUL_TILE_8x8 = 1,
158
+ CUBLASLT_MATMUL_TILE_8x16 = 2,
159
+ CUBLASLT_MATMUL_TILE_16x8 = 3,
160
+ CUBLASLT_MATMUL_TILE_8x32 = 4,
161
+ CUBLASLT_MATMUL_TILE_16x16 = 5,
162
+ CUBLASLT_MATMUL_TILE_32x8 = 6,
163
+ CUBLASLT_MATMUL_TILE_8x64 = 7,
164
+ CUBLASLT_MATMUL_TILE_16x32 = 8,
165
+ CUBLASLT_MATMUL_TILE_32x16 = 9,
166
+ CUBLASLT_MATMUL_TILE_64x8 = 10,
167
+ CUBLASLT_MATMUL_TILE_32x32 = 11,
168
+ CUBLASLT_MATMUL_TILE_32x64 = 12,
169
+ CUBLASLT_MATMUL_TILE_64x32 = 13,
170
+ CUBLASLT_MATMUL_TILE_32x128 = 14,
171
+ CUBLASLT_MATMUL_TILE_64x64 = 15,
172
+ CUBLASLT_MATMUL_TILE_128x32 = 16,
173
+ CUBLASLT_MATMUL_TILE_64x128 = 17,
174
+ CUBLASLT_MATMUL_TILE_128x64 = 18,
175
+ CUBLASLT_MATMUL_TILE_64x256 = 19,
176
+ CUBLASLT_MATMUL_TILE_128x128 = 20,
177
+ CUBLASLT_MATMUL_TILE_256x64 = 21,
178
+ CUBLASLT_MATMUL_TILE_64x512 = 22,
179
+ CUBLASLT_MATMUL_TILE_128x256 = 23,
180
+ CUBLASLT_MATMUL_TILE_256x128 = 24,
181
+ CUBLASLT_MATMUL_TILE_512x64 = 25,
182
+ CUBLASLT_MATMUL_TILE_64x96 = 26,
183
+ CUBLASLT_MATMUL_TILE_96x64 = 27,
184
+ CUBLASLT_MATMUL_TILE_96x128 = 28,
185
+ CUBLASLT_MATMUL_TILE_128x160 = 29,
186
+ CUBLASLT_MATMUL_TILE_160x128 = 30,
187
+ CUBLASLT_MATMUL_TILE_192x128 = 31,
188
+ CUBLASLT_MATMUL_TILE_128x192 = 32,
189
+ CUBLASLT_MATMUL_TILE_128x96 = 33,
190
+ CUBLASLT_MATMUL_TILE_32x256 = 34,
191
+ CUBLASLT_MATMUL_TILE_256x32 = 35,
192
+ CUBLASLT_MATMUL_TILE_END
193
+ } cublasLtMatmulTile_t;
194
+
195
+ /** Size and number of stages in which elements are read into shared memory
196
+ *
197
+ * General order of stages IDs is sorted by stage size first and by number of stages second.
198
+ */
199
+ typedef enum {
200
+ CUBLASLT_MATMUL_STAGES_UNDEFINED = 0,
201
+ CUBLASLT_MATMUL_STAGES_16x1 = 1,
202
+ CUBLASLT_MATMUL_STAGES_16x2 = 2,
203
+ CUBLASLT_MATMUL_STAGES_16x3 = 3,
204
+ CUBLASLT_MATMUL_STAGES_16x4 = 4,
205
+ CUBLASLT_MATMUL_STAGES_16x5 = 5,
206
+ CUBLASLT_MATMUL_STAGES_16x6 = 6,
207
+ CUBLASLT_MATMUL_STAGES_32x1 = 7,
208
+ CUBLASLT_MATMUL_STAGES_32x2 = 8,
209
+ CUBLASLT_MATMUL_STAGES_32x3 = 9,
210
+ CUBLASLT_MATMUL_STAGES_32x4 = 10,
211
+ CUBLASLT_MATMUL_STAGES_32x5 = 11,
212
+ CUBLASLT_MATMUL_STAGES_32x6 = 12,
213
+ CUBLASLT_MATMUL_STAGES_64x1 = 13,
214
+ CUBLASLT_MATMUL_STAGES_64x2 = 14,
215
+ CUBLASLT_MATMUL_STAGES_64x3 = 15,
216
+ CUBLASLT_MATMUL_STAGES_64x4 = 16,
217
+ CUBLASLT_MATMUL_STAGES_64x5 = 17,
218
+ CUBLASLT_MATMUL_STAGES_64x6 = 18,
219
+ CUBLASLT_MATMUL_STAGES_128x1 = 19,
220
+ CUBLASLT_MATMUL_STAGES_128x2 = 20,
221
+ CUBLASLT_MATMUL_STAGES_128x3 = 21,
222
+ CUBLASLT_MATMUL_STAGES_128x4 = 22,
223
+ CUBLASLT_MATMUL_STAGES_128x5 = 23,
224
+ CUBLASLT_MATMUL_STAGES_128x6 = 24,
225
+ CUBLASLT_MATMUL_STAGES_32x10 = 25,
226
+ CUBLASLT_MATMUL_STAGES_8x4 = 26,
227
+ CUBLASLT_MATMUL_STAGES_16x10 = 27,
228
+ CUBLASLT_MATMUL_STAGES_8x5 = 28,
229
+ CUBLASLT_MATMUL_STAGES_8x3 = 31,
230
+ CUBLASLT_MATMUL_STAGES_8xAUTO = 32,
231
+ CUBLASLT_MATMUL_STAGES_16xAUTO = 33,
232
+ CUBLASLT_MATMUL_STAGES_32xAUTO = 34,
233
+ CUBLASLT_MATMUL_STAGES_64xAUTO = 35,
234
+ CUBLASLT_MATMUL_STAGES_128xAUTO = 36,
235
+ CUBLASLT_MATMUL_STAGES_END
236
+ } cublasLtMatmulStages_t;
237
+
238
+ /** Thread Block Cluster size
239
+ *
240
+ * Typically dimensioned similar to cublasLtMatmulTile_t, with the third coordinate unused at this time.
241
+ */
242
+ typedef enum {
243
+ /** Let library pick cluster shape automatically */
244
+ CUBLASLT_CLUSTER_SHAPE_AUTO = 0,
245
+ CUBLASLT_CLUSTER_SHAPE_1x1x1 = 2,
246
+ CUBLASLT_CLUSTER_SHAPE_2x1x1 = 3,
247
+ CUBLASLT_CLUSTER_SHAPE_4x1x1 = 4,
248
+ CUBLASLT_CLUSTER_SHAPE_1x2x1 = 5,
249
+ CUBLASLT_CLUSTER_SHAPE_2x2x1 = 6,
250
+ CUBLASLT_CLUSTER_SHAPE_4x2x1 = 7,
251
+ CUBLASLT_CLUSTER_SHAPE_1x4x1 = 8,
252
+ CUBLASLT_CLUSTER_SHAPE_2x4x1 = 9,
253
+ CUBLASLT_CLUSTER_SHAPE_4x4x1 = 10,
254
+ CUBLASLT_CLUSTER_SHAPE_8x1x1 = 11,
255
+ CUBLASLT_CLUSTER_SHAPE_1x8x1 = 12,
256
+ CUBLASLT_CLUSTER_SHAPE_8x2x1 = 13,
257
+ CUBLASLT_CLUSTER_SHAPE_2x8x1 = 14,
258
+ CUBLASLT_CLUSTER_SHAPE_16x1x1 = 15,
259
+ CUBLASLT_CLUSTER_SHAPE_1x16x1 = 16,
260
+ CUBLASLT_CLUSTER_SHAPE_3x1x1 = 17,
261
+ CUBLASLT_CLUSTER_SHAPE_5x1x1 = 18,
262
+ CUBLASLT_CLUSTER_SHAPE_6x1x1 = 19,
263
+ CUBLASLT_CLUSTER_SHAPE_7x1x1 = 20,
264
+ CUBLASLT_CLUSTER_SHAPE_9x1x1 = 21,
265
+ CUBLASLT_CLUSTER_SHAPE_10x1x1 = 22,
266
+ CUBLASLT_CLUSTER_SHAPE_11x1x1 = 23,
267
+ CUBLASLT_CLUSTER_SHAPE_12x1x1 = 24,
268
+ CUBLASLT_CLUSTER_SHAPE_13x1x1 = 25,
269
+ CUBLASLT_CLUSTER_SHAPE_14x1x1 = 26,
270
+ CUBLASLT_CLUSTER_SHAPE_15x1x1 = 27,
271
+ CUBLASLT_CLUSTER_SHAPE_3x2x1 = 28,
272
+ CUBLASLT_CLUSTER_SHAPE_5x2x1 = 29,
273
+ CUBLASLT_CLUSTER_SHAPE_6x2x1 = 30,
274
+ CUBLASLT_CLUSTER_SHAPE_7x2x1 = 31,
275
+ CUBLASLT_CLUSTER_SHAPE_1x3x1 = 32,
276
+ CUBLASLT_CLUSTER_SHAPE_2x3x1 = 33,
277
+ CUBLASLT_CLUSTER_SHAPE_3x3x1 = 34,
278
+ CUBLASLT_CLUSTER_SHAPE_4x3x1 = 35,
279
+ CUBLASLT_CLUSTER_SHAPE_5x3x1 = 36,
280
+ CUBLASLT_CLUSTER_SHAPE_3x4x1 = 37,
281
+ CUBLASLT_CLUSTER_SHAPE_1x5x1 = 38,
282
+ CUBLASLT_CLUSTER_SHAPE_2x5x1 = 39,
283
+ CUBLASLT_CLUSTER_SHAPE_3x5x1 = 40,
284
+ CUBLASLT_CLUSTER_SHAPE_1x6x1 = 41,
285
+ CUBLASLT_CLUSTER_SHAPE_2x6x1 = 42,
286
+ CUBLASLT_CLUSTER_SHAPE_1x7x1 = 43,
287
+ CUBLASLT_CLUSTER_SHAPE_2x7x1 = 44,
288
+ CUBLASLT_CLUSTER_SHAPE_1x9x1 = 45,
289
+ CUBLASLT_CLUSTER_SHAPE_1x10x1 = 46,
290
+ CUBLASLT_CLUSTER_SHAPE_1x11x1 = 47,
291
+ CUBLASLT_CLUSTER_SHAPE_1x12x1 = 48,
292
+ CUBLASLT_CLUSTER_SHAPE_1x13x1 = 49,
293
+ CUBLASLT_CLUSTER_SHAPE_1x14x1 = 50,
294
+ CUBLASLT_CLUSTER_SHAPE_1x15x1 = 51,
295
+ CUBLASLT_CLUSTER_SHAPE_END
296
+ } cublasLtClusterShape_t;
297
+
298
+ /** Inner size of the kernel
299
+ *
300
+ * Represents various aspects of internal kernel design, that don't impact CUDA grid size but may have other more subtle
301
+ * effects.
302
+ *
303
+ */
304
+ typedef enum {
305
+ CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED = 0,
306
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA884 = 1,
307
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1684 = 2,
308
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA1688 = 3,
309
+ CUBLASLT_MATMUL_INNER_SHAPE_MMA16816 = 4,
310
+ CUBLASLT_MATMUL_INNER_SHAPE_END
311
+ } cublasLtMatmulInnerShape_t;
312
+
313
+ /** Pointer mode to use for alpha/beta */
314
+ typedef enum {
315
+ /** matches CUBLAS_POINTER_MODE_HOST, pointer targets a single value host memory */
316
+ CUBLASLT_POINTER_MODE_HOST = CUBLAS_POINTER_MODE_HOST,
317
+ /** matches CUBLAS_POINTER_MODE_DEVICE, pointer targets a single value device memory */
318
+ CUBLASLT_POINTER_MODE_DEVICE = CUBLAS_POINTER_MODE_DEVICE,
319
+ /** pointer targets an array in device memory */
320
+ CUBLASLT_POINTER_MODE_DEVICE_VECTOR = 2,
321
+ /** alpha pointer targets an array in device memory, beta is zero. Note:
322
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE is not supported, must be 0. */
323
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO = 3,
324
+ /** alpha pointer targets an array in device memory, beta is a single value in host memory. */
325
+ CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST = 4,
326
+ } cublasLtPointerMode_t;
327
+
328
+ /** Mask to define pointer mode capability */
329
+ typedef enum {
330
+ /** see CUBLASLT_POINTER_MODE_HOST */
331
+ CUBLASLT_POINTER_MODE_MASK_HOST = 1,
332
+ /** see CUBLASLT_POINTER_MODE_DEVICE */
333
+ CUBLASLT_POINTER_MODE_MASK_DEVICE = 2,
334
+ /** see CUBLASLT_POINTER_MODE_DEVICE_VECTOR */
335
+ CUBLASLT_POINTER_MODE_MASK_DEVICE_VECTOR = 4,
336
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO */
337
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_ZERO = 8,
338
+ /** see CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST */
339
+ CUBLASLT_POINTER_MODE_MASK_ALPHA_DEVICE_VECTOR_BETA_HOST = 16,
340
+ } cublasLtPointerModeMask_t;
341
+
342
+ /** Implementation details that may affect numerical behavior of algorithms. */
343
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_FMA (0x01ull << 0)
344
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_HMMA (0x02ull << 0)
345
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_IMMA (0x04ull << 0)
346
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_DMMA (0x08ull << 0)
347
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_TENSOR_OP_MASK (0xfeull << 0)
348
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_TYPE_MASK (0xffull << 0)
349
+
350
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_16F (0x01ull << 8)
351
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32F (0x02ull << 8)
352
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_64F (0x04ull << 8)
353
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_32I (0x08ull << 8)
354
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_ACCUMULATOR_TYPE_MASK (0xffull << 8)
355
+
356
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16F (0x01ull << 16)
357
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_16BF (0x02ull << 16)
358
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_TF32 (0x04ull << 16)
359
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_32F (0x08ull << 16)
360
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_64F (0x10ull << 16)
361
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8I (0x20ull << 16)
362
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E4M3 (0x40ull << 16)
363
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_INPUT_8F_E5M2 (0x80ull << 16)
364
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_OP_INPUT_TYPE_MASK (0xffull << 16)
365
+
366
+ #define CUBLASLT_NUMERICAL_IMPL_FLAGS_GAUSSIAN (0x01ull << 32)
367
+ typedef uint64_t cublasLtNumericalImplFlags_t;
368
+
369
+ /** Execute matrix multiplication (D = alpha * op(A) * op(B) + beta * C).
370
+ *
371
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
372
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
373
+ * when workspaceSizeInBytes is less than workspace required by configured
374
+ * algo
375
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
376
+ * operation
377
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
378
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
379
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
380
+ */
381
+ cublasStatus_t CUBLASWINAPI cublasLtMatmul(cublasLtHandle_t lightHandle,
382
+ cublasLtMatmulDesc_t computeDesc,
383
+ const void* alpha, /* host or device pointer */
384
+ const void* A,
385
+ cublasLtMatrixLayout_t Adesc,
386
+ const void* B,
387
+ cublasLtMatrixLayout_t Bdesc,
388
+ const void* beta, /* host or device pointer */
389
+ const void* C,
390
+ cublasLtMatrixLayout_t Cdesc,
391
+ void* D,
392
+ cublasLtMatrixLayout_t Ddesc,
393
+ const cublasLtMatmulAlgo_t* algo,
394
+ void* workspace,
395
+ size_t workspaceSizeInBytes,
396
+ cudaStream_t stream);
397
+
398
+ /** Matrix layout conversion helper (C = alpha * op(A) + beta * op(B))
399
+ *
400
+ * Can be used to change memory order of data or to scale and shift the values.
401
+ *
402
+ * \retval CUBLAS_STATUS_NOT_INITIALIZED if cuBLASLt handle has not been initialized
403
+ * \retval CUBLAS_STATUS_INVALID_VALUE if parameters are in conflict or in an impossible configuration; e.g.
404
+ * when A is not NULL, but Adesc is NULL
405
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if current implementation on selected device doesn't support configured
406
+ * operation
407
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if configured operation cannot be run using selected device
408
+ * \retval CUBLAS_STATUS_EXECUTION_FAILED if cuda reported execution error from the device
409
+ * \retval CUBLAS_STATUS_SUCCESS if the operation completed successfully
410
+ */
411
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransform(cublasLtHandle_t lightHandle,
412
+ cublasLtMatrixTransformDesc_t transformDesc,
413
+ const void* alpha, /* host or device pointer */
414
+ const void* A,
415
+ cublasLtMatrixLayout_t Adesc,
416
+ const void* beta, /* host or device pointer */
417
+ const void* B,
418
+ cublasLtMatrixLayout_t Bdesc,
419
+ void* C,
420
+ cublasLtMatrixLayout_t Cdesc,
421
+ cudaStream_t stream);
422
+
423
+ /* ---------------------------------------------------------------------------------------*/
424
+ /* Helper functions for cublasLtMatrixLayout_t */
425
+ /* ---------------------------------------------------------------------------------------*/
426
+
427
+ /** Enum for data ordering */
428
+ typedef enum {
429
+ /** Column-major
430
+ *
431
+ * Leading dimension is the stride (in elements) to the beginning of next column in memory.
432
+ */
433
+ CUBLASLT_ORDER_COL = 0,
434
+ /** Row major
435
+ *
436
+ * Leading dimension is the stride (in elements) to the beginning of next row in memory.
437
+ */
438
+ CUBLASLT_ORDER_ROW = 1,
439
+ /** Column-major ordered tiles of 32 columns.
440
+ *
441
+ * Leading dimension is the stride (in elements) to the beginning of next group of 32-columns. E.g. if matrix has 33
442
+ * columns and 2 rows, ld must be at least (32) * 2 = 64.
443
+ */
444
+ CUBLASLT_ORDER_COL32 = 2,
445
+ /** Column-major ordered tiles of composite tiles with total 32 columns and 8 rows, tile composed of interleaved
446
+ * inner tiles of 4 columns within 4 even or odd rows in an alternating pattern.
447
+ *
448
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 8 row tile for the next
449
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32 * 8) * 1 = 256.
450
+ */
451
+ CUBLASLT_ORDER_COL4_4R2_8C = 3,
452
+ /** Column-major ordered tiles of composite tiles with total 32 columns ands 32 rows.
453
+ * Element offset within the tile is calculated as (((row%8)/2*4+row/8)*2+row%2)*32+col.
454
+ *
455
+ * Leading dimension is the stride (in elements) to the beginning of the first 32 column x 32 row tile for the next
456
+ * 32-wide group of columns. E.g. if matrix has 33 columns and 1 row, ld must be at least (32*32)*1 = 1024.
457
+ */
458
+ CUBLASLT_ORDER_COL32_2R_4R4 = 4,
459
+
460
+ } cublasLtOrder_t;
461
+
462
+ /** Attributes of memory layout */
463
+ typedef enum {
464
+ /** Data type, see cudaDataType.
465
+ *
466
+ * uint32_t
467
+ */
468
+ CUBLASLT_MATRIX_LAYOUT_TYPE = 0,
469
+
470
+ /** Memory order of the data, see cublasLtOrder_t.
471
+ *
472
+ * int32_t, default: CUBLASLT_ORDER_COL
473
+ */
474
+ CUBLASLT_MATRIX_LAYOUT_ORDER = 1,
475
+
476
+ /** Number of rows.
477
+ *
478
+ * Usually only values that can be expressed as int32_t are supported.
479
+ *
480
+ * uint64_t
481
+ */
482
+ CUBLASLT_MATRIX_LAYOUT_ROWS = 2,
483
+
484
+ /** Number of columns.
485
+ *
486
+ * Usually only values that can be expressed as int32_t are supported.
487
+ *
488
+ * uint64_t
489
+ */
490
+ CUBLASLT_MATRIX_LAYOUT_COLS = 3,
491
+
492
+ /** Matrix leading dimension.
493
+ *
494
+ * For CUBLASLT_ORDER_COL this is stride (in elements) of matrix column, for more details and documentation for
495
+ * other memory orders see documentation for cublasLtOrder_t values.
496
+ *
497
+ * Currently only non-negative values are supported, must be large enough so that matrix memory locations are not
498
+ * overlapping (e.g. greater or equal to CUBLASLT_MATRIX_LAYOUT_ROWS in case of CUBLASLT_ORDER_COL).
499
+ *
500
+ * int64_t;
501
+ */
502
+ CUBLASLT_MATRIX_LAYOUT_LD = 4,
503
+
504
+ /** Number of matmul operations to perform in the batch.
505
+ *
506
+ * See also CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT
507
+ *
508
+ * int32_t, default: 1
509
+ */
510
+ CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT = 5,
511
+
512
+ /** Stride (in elements) to the next matrix for strided batch operation.
513
+ *
514
+ * When matrix type is planar-complex (CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET != 0), batch stride
515
+ * is interpreted by cublasLtMatmul() in number of real valued sub-elements. E.g. for data of type CUDA_C_16F,
516
+ * offset of 1024B is encoded as a stride of value 512 (since each element of the real and imaginary matrices
517
+ * is a 2B (16bit) floating point type).
518
+ *
519
+ * NOTE: A bug in cublasLtMatrixTransform() causes it to interpret the batch stride for a planar-complex matrix
520
+ * as if it was specified in number of complex elements. Therefore an offset of 1024B must be encoded as stride
521
+ * value 256 when calling cublasLtMatrixTransform() (each complex element is 4B with real and imaginary values 2B
522
+ * each). This behavior is expected to be corrected in the next major cuBLAS version.
523
+ *
524
+ * int64_t, default: 0
525
+ */
526
+ CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET = 6,
527
+
528
+ /** Stride (in bytes) to the imaginary plane for planar complex layout.
529
+ *
530
+ * int64_t, default: 0 - 0 means that layout is regular (real and imaginary parts of complex numbers are interleaved
531
+ * in memory in each element)
532
+ */
533
+ CUBLASLT_MATRIX_LAYOUT_PLANE_OFFSET = 7,
534
+ } cublasLtMatrixLayoutAttribute_t;
535
+
536
+ /** Internal. Do not use directly.
537
+ */
538
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutInit_internal( //
539
+ cublasLtMatrixLayout_t matLayout,
540
+ size_t size,
541
+ cudaDataType type,
542
+ uint64_t rows,
543
+ uint64_t cols,
544
+ int64_t ld);
545
+
546
+ /** Initialize matrix layout descriptor in pre-allocated space.
547
+ *
548
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
549
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
550
+ */
551
+ static inline cublasStatus_t cublasLtMatrixLayoutInit(
552
+ cublasLtMatrixLayout_t matLayout, cudaDataType type, uint64_t rows, uint64_t cols, int64_t ld) {
553
+ return cublasLtMatrixLayoutInit_internal(matLayout, sizeof(*matLayout), type, rows, cols, ld);
554
+ }
555
+
556
+ /** Create new matrix layout descriptor.
557
+ *
558
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
559
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
560
+ */
561
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutCreate( //
562
+ cublasLtMatrixLayout_t* matLayout,
563
+ cudaDataType type,
564
+ uint64_t rows,
565
+ uint64_t cols,
566
+ int64_t ld);
567
+
568
+ /** Destroy matrix layout descriptor.
569
+ *
570
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
571
+ */
572
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutDestroy(cublasLtMatrixLayout_t matLayout);
573
+
574
+ /** Set matrix layout descriptor attribute.
575
+ *
576
+ * \param[in] matLayout The descriptor
577
+ * \param[in] attr The attribute
578
+ * \param[in] buf memory address containing the new value
579
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
580
+ *
581
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
582
+ * selected attribute
583
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
584
+ */
585
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutSetAttribute( //
586
+ cublasLtMatrixLayout_t matLayout,
587
+ cublasLtMatrixLayoutAttribute_t attr,
588
+ const void* buf,
589
+ size_t sizeInBytes);
590
+
591
+ /** Get matrix layout descriptor attribute.
592
+ *
593
+ * \param[in] matLayout The descriptor
594
+ * \param[in] attr The attribute
595
+ * \param[out] buf memory address containing the new value
596
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
597
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
598
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
599
+ *
600
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
601
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
602
+ * selected attribute
603
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
604
+ */
605
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixLayoutGetAttribute( //
606
+ cublasLtMatrixLayout_t matLayout,
607
+ cublasLtMatrixLayoutAttribute_t attr,
608
+ void* buf,
609
+ size_t sizeInBytes,
610
+ size_t* sizeWritten);
611
+
612
+ /* ---------------------------------------------------------------------------------------*/
613
+ /* Helper functions for cublasLtMatmulDesc_t */
614
+ /* ---------------------------------------------------------------------------------------*/
615
+
616
+ /** Matmul descriptor attributes to define details of the operation. */
617
+ typedef enum {
618
+ /** Compute type, see cudaDataType. Defines data type used for multiply and accumulate operations and the
619
+ * accumulator during matrix multiplication.
620
+ *
621
+ * int32_t
622
+ */
623
+ CUBLASLT_MATMUL_DESC_COMPUTE_TYPE = 0,
624
+
625
+ /** Scale type, see cudaDataType. Defines data type of alpha and beta. Accumulator and value from matrix C are
626
+ * typically converted to scale type before final scaling. Value is then converted from scale type to type of matrix
627
+ * D before being stored in memory.
628
+ *
629
+ * int32_t, default: same as CUBLASLT_MATMUL_DESC_COMPUTE_TYPE
630
+ */
631
+ CUBLASLT_MATMUL_DESC_SCALE_TYPE = 1,
632
+
633
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t. When CUBLASLT_POINTER_MODE_DEVICE_VECTOR is in use,
634
+ * alpha/beta vector lenghts must match number of output matrix rows.
635
+ *
636
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
637
+ */
638
+ CUBLASLT_MATMUL_DESC_POINTER_MODE = 2,
639
+
640
+ /** Transform of matrix A, see cublasOperation_t.
641
+ *
642
+ * int32_t, default: CUBLAS_OP_N
643
+ */
644
+ CUBLASLT_MATMUL_DESC_TRANSA = 3,
645
+
646
+ /** Transform of matrix B, see cublasOperation_t.
647
+ *
648
+ * int32_t, default: CUBLAS_OP_N
649
+ */
650
+ CUBLASLT_MATMUL_DESC_TRANSB = 4,
651
+
652
+ /** Transform of matrix C, see cublasOperation_t.
653
+ *
654
+ * Currently only CUBLAS_OP_N is supported.
655
+ *
656
+ * int32_t, default: CUBLAS_OP_N
657
+ */
658
+ CUBLASLT_MATMUL_DESC_TRANSC = 5,
659
+
660
+ /** Matrix fill mode, see cublasFillMode_t.
661
+ *
662
+ * int32_t, default: CUBLAS_FILL_MODE_FULL
663
+ */
664
+ CUBLASLT_MATMUL_DESC_FILL_MODE = 6,
665
+
666
+ /** Epilogue function, see cublasLtEpilogue_t.
667
+ *
668
+ * uint32_t, default: CUBLASLT_EPILOGUE_DEFAULT
669
+ */
670
+ CUBLASLT_MATMUL_DESC_EPILOGUE = 7,
671
+
672
+ /** Bias or bias gradient vector pointer in the device memory.
673
+ *
674
+ * Bias case. See CUBLASLT_EPILOGUE_BIAS.
675
+ * For bias data type see CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE.
676
+ *
677
+ * Bias vector length must match matrix D rows count.
678
+ *
679
+ * Bias gradient case. See CUBLASLT_EPILOGUE_DRELU_BGRAD and CUBLASLT_EPILOGUE_DGELU_BGRAD.
680
+ * Bias gradient vector elements are the same type as the output elements
681
+ * (Ctype) with the exception of IMMA kernels (see above).
682
+ *
683
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
684
+ * depend on its value to determine expected pointer alignment.
685
+ *
686
+ * Bias case: const void *, default: NULL
687
+ * Bias gradient case: void *, default: NULL
688
+ */
689
+ CUBLASLT_MATMUL_DESC_BIAS_POINTER = 8,
690
+
691
+ /** Batch stride for bias or bias gradient vector.
692
+ *
693
+ * Used together with CUBLASLT_MATMUL_DESC_BIAS_POINTER when matrix D's CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1.
694
+ *
695
+ * int64_t, default: 0
696
+ */
697
+ CUBLASLT_MATMUL_DESC_BIAS_BATCH_STRIDE = 10,
698
+
699
+ /** Pointer for epilogue auxiliary buffer.
700
+ *
701
+ * - Output vector for ReLu bit-mask in forward pass when CUBLASLT_EPILOGUE_RELU_AUX
702
+ * or CUBLASLT_EPILOGUE_RELU_AUX_BIAS epilogue is used.
703
+ * - Input vector for ReLu bit-mask in backward pass when
704
+ * CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is used.
705
+ *
706
+ * - Output of GELU input matrix in forward pass when
707
+ * CUBLASLT_EPILOGUE_GELU_AUX_BIAS epilogue is used.
708
+ * - Input of GELU input matrix for backward pass when
709
+ * CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue is used.
710
+ *
711
+ * For aux data type see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE.
712
+ *
713
+ * Routines that don't dereference this pointer, like cublasLtMatmulAlgoGetHeuristic()
714
+ * depend on its value to determine expected pointer alignment.
715
+ *
716
+ * Requires setting CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD attribute.
717
+ *
718
+ * Forward pass: void *, default: NULL
719
+ * Backward pass: const void *, default: NULL
720
+ */
721
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER = 11,
722
+
723
+ /** Leading dimension for epilogue auxiliary buffer.
724
+ *
725
+ * - ReLu bit-mask matrix leading dimension in elements (i.e. bits)
726
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
727
+ * used. Must be divisible by 128 and be no less than the number of rows in the output matrix.
728
+ *
729
+ * - GELU input matrix leading dimension in elements
730
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
731
+ * Must be divisible by 8 and be no less than the number of rows in the output matrix.
732
+ *
733
+ * int64_t, default: 0
734
+ */
735
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD = 12,
736
+
737
+ /** Batch stride for epilogue auxiliary buffer.
738
+ *
739
+ * - ReLu bit-mask matrix batch stride in elements (i.e. bits)
740
+ * when CUBLASLT_EPILOGUE_RELU_AUX, CUBLASLT_EPILOGUE_RELU_AUX_BIAS or CUBLASLT_EPILOGUE_DRELU_BGRAD epilogue is
741
+ * used. Must be divisible by 128.
742
+ *
743
+ * - GELU input matrix batch stride in elements
744
+ * when CUBLASLT_EPILOGUE_GELU_AUX_BIAS or CUBLASLT_EPILOGUE_DGELU_BGRAD epilogue used.
745
+ * Must be divisible by 8.
746
+ *
747
+ * int64_t, default: 0
748
+ */
749
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_BATCH_STRIDE = 13,
750
+
751
+ /** Batch stride for alpha vector.
752
+ *
753
+ * Used together with CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_HOST when matrix D's
754
+ * CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT > 1. If CUBLASLT_POINTER_MODE_ALPHA_DEVICE_VECTOR_BETA_ZERO is set then
755
+ * CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE must be set to 0 as this mode doesnt supported batched alpha vector.
756
+ *
757
+ * int64_t, default: 0
758
+ */
759
+ CUBLASLT_MATMUL_DESC_ALPHA_VECTOR_BATCH_STRIDE = 14,
760
+
761
+ /** Number of SMs to target for parallel execution. Optimizes heuristics for execution on a different number of SMs
762
+ * when user expects a concurrent stream to be using some of the device resources.
763
+ *
764
+ * int32_t, default: 0 - use the number reported by the device.
765
+ */
766
+ CUBLASLT_MATMUL_DESC_SM_COUNT_TARGET = 15,
767
+
768
+ /** Device pointer to the scale factor value that converts data in matrix A to the compute data type range.
769
+ *
770
+ * The scaling factor value must have the same type as the compute type.
771
+ *
772
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
773
+ *
774
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
775
+ * will return CUBLAS_INVALID_VALUE.
776
+ *
777
+ * const void *, default: NULL
778
+ */
779
+ CUBLASLT_MATMUL_DESC_A_SCALE_POINTER = 17,
780
+
781
+ /** Device pointer to the scale factor value to convert data in matrix B to compute data type range.
782
+ *
783
+ * The scaling factor value must have the same type as the compute type.
784
+ *
785
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
786
+ *
787
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
788
+ * will return CUBLAS_INVALID_VALUE.
789
+ *
790
+ * const void *, default: NULL
791
+ */
792
+ CUBLASLT_MATMUL_DESC_B_SCALE_POINTER = 18,
793
+
794
+ /** Device pointer to the scale factor value to convert data in matrix C to compute data type range.
795
+ *
796
+ * The scaling factor value must have the same type as the compute type.
797
+ *
798
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
799
+ *
800
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
801
+ * will return CUBLAS_INVALID_VALUE.
802
+ *
803
+ * const void *, default: NULL
804
+ */
805
+ CUBLASLT_MATMUL_DESC_C_SCALE_POINTER = 19,
806
+
807
+ /** Device pointer to the scale factor value to convert data in matrix D to compute data type range.
808
+ *
809
+ * The scaling factor value must have the same type as the compute type.
810
+ *
811
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1.
812
+ *
813
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
814
+ * will return CUBLAS_INVALID_VALUE.
815
+ *
816
+ * const void *, default: NULL
817
+ */
818
+ CUBLASLT_MATMUL_DESC_D_SCALE_POINTER = 20,
819
+
820
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
821
+ * output matrix.
822
+ *
823
+ * The computed value has the same type as the compute type.
824
+ *
825
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
826
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
827
+ *
828
+ * void *, default: NULL
829
+ */
830
+ CUBLASLT_MATMUL_DESC_AMAX_D_POINTER = 21,
831
+
832
+ /** Type of the data to be stored to the memory pointed to by CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
833
+ *
834
+ * If unset, the data type defaults to the type of elements of the output matrix with some exceptions, see details
835
+ * below.
836
+ *
837
+ * ReLu uses a bit-mask.
838
+ *
839
+ * GELU input matrix elements type is the same as the type of elements of
840
+ * the output matrix with some exceptions, see details below.
841
+ *
842
+ * For fp8 kernels with output type CUDA_R_8F_E4M3 the aux data type can be CUDA_R_8F_E4M3 or CUDA_R_16F with some
843
+ * restrictions. See https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmulDescAttributes_t for more details.
844
+ *
845
+ * If set for an unsupported matrix data, scale, and compute type combination, calling cublasLtMatmul()
846
+ * will return CUBLAS_INVALID_VALUE.
847
+ *
848
+ * int32_t based on cudaDataType, default: -1
849
+ */
850
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_DATA_TYPE = 22,
851
+
852
+ /** Device pointer to the scaling factor value to convert results from compute type data range to storage
853
+ * data range in the auxiliary matrix that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
854
+ *
855
+ * The scaling factor value must have the same type as the compute type.
856
+ *
857
+ * If not specified, or set to NULL, the scaling factor is assumed to be 1. If set for an unsupported matrix data,
858
+ * scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
859
+ *
860
+ * void *, default: NULL
861
+ */
862
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_SCALE_POINTER = 23,
863
+
864
+ /** Device pointer to the memory location that on completion will be set to the maximum of absolute values in the
865
+ * buffer that is set via CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
866
+ *
867
+ * The computed value has the same type as the compute type.
868
+ *
869
+ * If not specified or set to NULL, the maximum absolute value is not computed. If set for an unsupported matrix
870
+ * data, scale, and compute type combination, calling cublasLtMatmul() will return CUBLAS_INVALID_VALUE.
871
+ *
872
+ * void *, default: NULL
873
+ */
874
+ CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_AMAX_POINTER = 24,
875
+
876
+ /** Flag for managing fp8 fast accumulation mode.
877
+ * When enabled, problem execution might be faster but at the cost of lower accuracy because intermediate results
878
+ * will not periodically be promoted to a higher precision.
879
+ *
880
+ * int8_t, default: 0 - fast accumulation mode is disabled.
881
+ */
882
+ CUBLASLT_MATMUL_DESC_FAST_ACCUM = 25,
883
+
884
+ /** Type of bias or bias gradient vector in the device memory.
885
+ *
886
+ * Bias case: see CUBLASLT_EPILOGUE_BIAS.
887
+ *
888
+ * Bias vector elements are the same type as the elements of output matrix (Dtype) with the following exceptions:
889
+ * - IMMA kernels with computeType=CUDA_R_32I and Ctype=CUDA_R_8I where the bias vector elements
890
+ * are the same type as alpha, beta (CUBLASLT_MATMUL_DESC_SCALE_TYPE=CUDA_R_32F)
891
+ * - fp8 kernels with an output type of CUDA_R_32F, CUDA_R_8F_E4M3 or CUDA_R_8F_E5M2, See
892
+ * https://docs.nvidia.com/cuda/cublas/index.html#cublasLtMatmul for details.
893
+ *
894
+ * int32_t based on cudaDataType, default: -1
895
+ */
896
+ CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE = 26,
897
+ } cublasLtMatmulDescAttributes_t;
898
+
899
+ /** Internal. Do not use directly.
900
+ */
901
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescInit_internal( //
902
+ cublasLtMatmulDesc_t matmulDesc,
903
+ size_t size,
904
+ cublasComputeType_t computeType,
905
+ cudaDataType_t scaleType);
906
+
907
+ /** Initialize matmul operation descriptor in pre-allocated space.
908
+ *
909
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
910
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was initialized successfully
911
+ */
912
+ static inline cublasStatus_t cublasLtMatmulDescInit( //
913
+ cublasLtMatmulDesc_t matmulDesc,
914
+ cublasComputeType_t computeType,
915
+ cudaDataType_t scaleType) {
916
+ return cublasLtMatmulDescInit_internal(matmulDesc, sizeof(*matmulDesc), computeType, scaleType);
917
+ }
918
+
919
+ /** Create new matmul operation descriptor.
920
+ *
921
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
922
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
923
+ */
924
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescCreate(cublasLtMatmulDesc_t* matmulDesc,
925
+ cublasComputeType_t computeType,
926
+ cudaDataType_t scaleType);
927
+
928
+ /** Destroy matmul operation descriptor.
929
+ *
930
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
931
+ */
932
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescDestroy(cublasLtMatmulDesc_t matmulDesc);
933
+
934
+ /** Set matmul operation descriptor attribute.
935
+ *
936
+ * \param[in] matmulDesc The descriptor
937
+ * \param[in] attr The attribute
938
+ * \param[in] buf memory address containing the new value
939
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
940
+ *
941
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
942
+ * selected attribute
943
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
944
+ */
945
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescSetAttribute( //
946
+ cublasLtMatmulDesc_t matmulDesc,
947
+ cublasLtMatmulDescAttributes_t attr,
948
+ const void* buf,
949
+ size_t sizeInBytes);
950
+
951
+ /** Get matmul operation descriptor attribute.
952
+ *
953
+ * \param[in] matmulDesc The descriptor
954
+ * \param[in] attr The attribute
955
+ * \param[out] buf memory address containing the new value
956
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
957
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
958
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
959
+ *
960
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
961
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
962
+ * selected attribute
963
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
964
+ */
965
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulDescGetAttribute( //
966
+ cublasLtMatmulDesc_t matmulDesc,
967
+ cublasLtMatmulDescAttributes_t attr,
968
+ void* buf,
969
+ size_t sizeInBytes,
970
+ size_t* sizeWritten);
971
+
972
+ /* ---------------------------------------------------------------------------------------*/
973
+ /* Helper functions for cublasLtMatrixTransformDesc_t */
974
+ /* ---------------------------------------------------------------------------------------*/
975
+
976
+ /** Matrix transform descriptor attributes to define details of the operation.
977
+ */
978
+ typedef enum {
979
+ /** Scale type, see cudaDataType. Inputs are converted to scale type for scaling and summation and results are then
980
+ * converted to output type to store in memory.
981
+ *
982
+ * int32_t
983
+ */
984
+ CUBLASLT_MATRIX_TRANSFORM_DESC_SCALE_TYPE,
985
+
986
+ /** Pointer mode of alpha and beta, see cublasLtPointerMode_t.
987
+ *
988
+ * int32_t, default: CUBLASLT_POINTER_MODE_HOST
989
+ */
990
+ CUBLASLT_MATRIX_TRANSFORM_DESC_POINTER_MODE,
991
+
992
+ /** Transform of matrix A, see cublasOperation_t.
993
+ *
994
+ * int32_t, default: CUBLAS_OP_N
995
+ */
996
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSA,
997
+
998
+ /** Transform of matrix B, see cublasOperation_t.
999
+ *
1000
+ * int32_t, default: CUBLAS_OP_N
1001
+ */
1002
+ CUBLASLT_MATRIX_TRANSFORM_DESC_TRANSB,
1003
+ } cublasLtMatrixTransformDescAttributes_t;
1004
+
1005
+ /** Internal. Do not use directly.
1006
+ */
1007
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescInit_internal(cublasLtMatrixTransformDesc_t transformDesc,
1008
+ size_t size,
1009
+ cudaDataType scaleType);
1010
+
1011
+ /** Initialize matrix transform operation descriptor in pre-allocated space.
1012
+ *
1013
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1014
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1015
+ */
1016
+ static inline cublasStatus_t cublasLtMatrixTransformDescInit(cublasLtMatrixTransformDesc_t transformDesc,
1017
+ cudaDataType scaleType) {
1018
+ return cublasLtMatrixTransformDescInit_internal(transformDesc, sizeof(*transformDesc), scaleType);
1019
+ }
1020
+
1021
+ /** Create new matrix transform operation descriptor.
1022
+ *
1023
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1024
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1025
+ */
1026
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescCreate(cublasLtMatrixTransformDesc_t* transformDesc,
1027
+ cudaDataType scaleType);
1028
+
1029
+ /** Destroy matrix transform operation descriptor.
1030
+ *
1031
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1032
+ */
1033
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescDestroy(cublasLtMatrixTransformDesc_t transformDesc);
1034
+
1035
+ /** Set matrix transform operation descriptor attribute.
1036
+ *
1037
+ * \param[in] transformDesc The descriptor
1038
+ * \param[in] attr The attribute
1039
+ * \param[in] buf memory address containing the new value
1040
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1041
+ *
1042
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1043
+ * selected attribute
1044
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1045
+ */
1046
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescSetAttribute( //
1047
+ cublasLtMatrixTransformDesc_t transformDesc,
1048
+ cublasLtMatrixTransformDescAttributes_t attr,
1049
+ const void* buf,
1050
+ size_t sizeInBytes);
1051
+
1052
+ /** Get matrix transform operation descriptor attribute.
1053
+ *
1054
+ * \param[in] transformDesc The descriptor
1055
+ * \param[in] attr The attribute
1056
+ * \param[out] buf memory address containing the new value
1057
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1058
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number
1059
+ * of bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1060
+ *
1061
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1062
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1063
+ * selected attribute
1064
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1065
+ */
1066
+ cublasStatus_t CUBLASWINAPI cublasLtMatrixTransformDescGetAttribute( //
1067
+ cublasLtMatrixTransformDesc_t transformDesc,
1068
+ cublasLtMatrixTransformDescAttributes_t attr,
1069
+ void* buf,
1070
+ size_t sizeInBytes,
1071
+ size_t* sizeWritten);
1072
+
1073
+ /** Reduction scheme for portions of the dot-product calculated in parallel (a. k. a. "split - K").
1074
+ */
1075
+ typedef enum {
1076
+ /** No reduction scheme, dot-product shall be performed in one sequence.
1077
+ */
1078
+ CUBLASLT_REDUCTION_SCHEME_NONE = 0,
1079
+
1080
+ /** Reduction is performed "in place" - using the output buffer (and output data type) and counters (in workspace) to
1081
+ * guarantee the sequentiality.
1082
+ */
1083
+ CUBLASLT_REDUCTION_SCHEME_INPLACE = 1,
1084
+
1085
+ /** Intermediate results are stored in compute type in the workspace and reduced in a separate step.
1086
+ */
1087
+ CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE = 2,
1088
+
1089
+ /** Intermediate results are stored in output type in the workspace and reduced in a separate step.
1090
+ */
1091
+ CUBLASLT_REDUCTION_SCHEME_OUTPUT_TYPE = 4,
1092
+
1093
+ CUBLASLT_REDUCTION_SCHEME_MASK = 0x7,
1094
+ } cublasLtReductionScheme_t;
1095
+
1096
+ /** Postprocessing options for the epilogue
1097
+ */
1098
+ typedef enum {
1099
+ /** No special postprocessing, just scale and quantize results if necessary.
1100
+ */
1101
+ CUBLASLT_EPILOGUE_DEFAULT = 1,
1102
+
1103
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1104
+ */
1105
+ CUBLASLT_EPILOGUE_RELU = 2,
1106
+
1107
+ /** ReLu, apply ReLu point-wise transform to the results (x:=max(x, 0)).
1108
+ *
1109
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1110
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1111
+ */
1112
+ CUBLASLT_EPILOGUE_RELU_AUX = (CUBLASLT_EPILOGUE_RELU | 128),
1113
+
1114
+ /** Bias, apply (broadcasted) Bias from bias vector. Bias vector length must match matrix D rows, it must be packed
1115
+ * (stride between vector elements is 1). Bias vector is broadcasted to all columns and added before applying final
1116
+ * postprocessing.
1117
+ */
1118
+ CUBLASLT_EPILOGUE_BIAS = 4,
1119
+
1120
+ /** ReLu and Bias, apply Bias and then ReLu transform
1121
+ */
1122
+ CUBLASLT_EPILOGUE_RELU_BIAS = (CUBLASLT_EPILOGUE_RELU | CUBLASLT_EPILOGUE_BIAS),
1123
+
1124
+ /** ReLu and Bias, apply Bias and then ReLu transform
1125
+ *
1126
+ * This epilogue mode produces an extra output, a ReLu bit-mask matrix,
1127
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1128
+ */
1129
+ CUBLASLT_EPILOGUE_RELU_AUX_BIAS = (CUBLASLT_EPILOGUE_RELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1130
+
1131
+ /* ReLu gradient. Apply ReLu gradient to matmul output. Store ReLu gradient in the output matrix.
1132
+ *
1133
+ * This epilogue mode requires an extra input,
1134
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1135
+ */
1136
+ CUBLASLT_EPILOGUE_DRELU = 8 | 128,
1137
+
1138
+ /* ReLu and Bias gradients. Apply independently ReLu and Bias gradient to
1139
+ * matmul output. Store ReLu gradient in the output matrix, and Bias gradient
1140
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1141
+ *
1142
+ * This epilogue mode requires an extra input,
1143
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1144
+ */
1145
+ CUBLASLT_EPILOGUE_DRELU_BGRAD = CUBLASLT_EPILOGUE_DRELU | 16,
1146
+
1147
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1148
+ */
1149
+ CUBLASLT_EPILOGUE_GELU = 32,
1150
+
1151
+ /** GELU, apply GELU point-wise transform to the results (x:=GELU(x)).
1152
+ *
1153
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1154
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1155
+ */
1156
+ CUBLASLT_EPILOGUE_GELU_AUX = (CUBLASLT_EPILOGUE_GELU | 128),
1157
+
1158
+ /** GELU and Bias, apply Bias and then GELU transform
1159
+ */
1160
+ CUBLASLT_EPILOGUE_GELU_BIAS = (CUBLASLT_EPILOGUE_GELU | CUBLASLT_EPILOGUE_BIAS),
1161
+
1162
+ /** GELU and Bias, apply Bias and then GELU transform
1163
+ *
1164
+ * This epilogue mode outputs GELU input as a separate matrix (useful for training).
1165
+ * See CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1166
+ */
1167
+ CUBLASLT_EPILOGUE_GELU_AUX_BIAS = (CUBLASLT_EPILOGUE_GELU_AUX | CUBLASLT_EPILOGUE_BIAS),
1168
+
1169
+ /* GELU gradient. Apply GELU gradient to matmul output. Store GELU gradient in the output matrix.
1170
+ *
1171
+ * This epilogue mode requires an extra input,
1172
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1173
+ */
1174
+ CUBLASLT_EPILOGUE_DGELU = 64 | 128,
1175
+
1176
+ /* GELU and Bias gradients. Apply independently GELU and Bias gradient to
1177
+ * matmul output. Store GELU gradient in the output matrix, and Bias gradient
1178
+ * in the auxiliary output (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1179
+ *
1180
+ * This epilogue mode requires an extra input,
1181
+ * see CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER.
1182
+ */
1183
+ CUBLASLT_EPILOGUE_DGELU_BGRAD = CUBLASLT_EPILOGUE_DGELU | 16,
1184
+
1185
+ /** Bias gradient based on the input matrix A.
1186
+ *
1187
+ * The bias size corresponds to the number of rows of the matrix D.
1188
+ * The reduction happens over the GEMM's "k" dimension.
1189
+ *
1190
+ * Stores Bias gradient in the auxiliary output
1191
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1192
+ */
1193
+ CUBLASLT_EPILOGUE_BGRADA = 256,
1194
+
1195
+ /** Bias gradient based on the input matrix B.
1196
+ *
1197
+ * The bias size corresponds to the number of columns of the matrix D.
1198
+ * The reduction happens over the GEMM's "k" dimension.
1199
+ *
1200
+ * Stores Bias gradient in the auxiliary output
1201
+ * (see CUBLASLT_MATMUL_DESC_BIAS_POINTER).
1202
+ */
1203
+ CUBLASLT_EPILOGUE_BGRADB = 512,
1204
+ } cublasLtEpilogue_t;
1205
+
1206
+ /** Matmul heuristic search mode
1207
+ */
1208
+ typedef enum {
1209
+ /** ask heuristics for best algo for given usecase
1210
+ */
1211
+ CUBLASLT_SEARCH_BEST_FIT = 0,
1212
+ /** only try to find best config for preconfigured algo id
1213
+ */
1214
+ CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID = 1,
1215
+ /** reserved for future use
1216
+ */
1217
+ CUBLASLT_SEARCH_RESERVED_02 = 2,
1218
+ /** reserved for future use
1219
+ */
1220
+ CUBLASLT_SEARCH_RESERVED_03 = 3,
1221
+ /** reserved for future use
1222
+ */
1223
+ CUBLASLT_SEARCH_RESERVED_04 = 4,
1224
+ /** reserved for future use
1225
+ */
1226
+ CUBLASLT_SEARCH_RESERVED_05 = 5,
1227
+ } cublasLtMatmulSearch_t;
1228
+
1229
+ /** Algo search preference to fine tune the heuristic function. */
1230
+ typedef enum {
1231
+ /** Search mode, see cublasLtMatmulSearch_t.
1232
+ *
1233
+ * uint32_t, default: CUBLASLT_SEARCH_BEST_FIT
1234
+ */
1235
+ CUBLASLT_MATMUL_PREF_SEARCH_MODE = 0,
1236
+
1237
+ /** Maximum allowed workspace size in bytes.
1238
+ *
1239
+ * uint64_t, default: 0 - no workspace allowed
1240
+ */
1241
+ CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES = 1,
1242
+
1243
+ /** Reduction scheme mask, see cublasLtReductionScheme_t. Filters heuristic result to only include algo configs that
1244
+ * use one of the required modes.
1245
+ *
1246
+ * E.g. mask value of 0x03 will allow only INPLACE and COMPUTE_TYPE reduction schemes.
1247
+ *
1248
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_MASK (allows all reduction schemes)
1249
+ */
1250
+ CUBLASLT_MATMUL_PREF_REDUCTION_SCHEME_MASK = 3,
1251
+
1252
+ /** Minimum buffer alignment for matrix A (in bytes).
1253
+ *
1254
+ * Selecting a smaller value will exclude algorithms that can not work with matrix A that is not as strictly aligned
1255
+ * as they need.
1256
+ *
1257
+ * uint32_t, default: 256
1258
+ */
1259
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES = 5,
1260
+
1261
+ /** Minimum buffer alignment for matrix B (in bytes).
1262
+ *
1263
+ * Selecting a smaller value will exclude algorithms that can not work with matrix B that is not as strictly aligned
1264
+ * as they need.
1265
+ *
1266
+ * uint32_t, default: 256
1267
+ */
1268
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES = 6,
1269
+
1270
+ /** Minimum buffer alignment for matrix C (in bytes).
1271
+ *
1272
+ * Selecting a smaller value will exclude algorithms that can not work with matrix C that is not as strictly aligned
1273
+ * as they need.
1274
+ *
1275
+ * uint32_t, default: 256
1276
+ */
1277
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES = 7,
1278
+
1279
+ /** Minimum buffer alignment for matrix D (in bytes).
1280
+ *
1281
+ * Selecting a smaller value will exclude algorithms that can not work with matrix D that is not as strictly aligned
1282
+ * as they need.
1283
+ *
1284
+ * uint32_t, default: 256
1285
+ */
1286
+ CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_D_BYTES = 8,
1287
+
1288
+ /** Maximum wave count.
1289
+ *
1290
+ * See cublasLtMatmulHeuristicResult_t::wavesCount.
1291
+ *
1292
+ * Selecting a non-zero value will exclude algorithms that report device utilization higher than specified.
1293
+ *
1294
+ * float, default: 0.0f
1295
+ */
1296
+ CUBLASLT_MATMUL_PREF_MAX_WAVES_COUNT = 9,
1297
+
1298
+ /** Numerical implementation details mask, see cublasLtNumericalImplFlags_t. Filters heuristic result to only include
1299
+ * algorithms that use the allowed implementations.
1300
+ *
1301
+ * uint64_t, default: uint64_t(-1) (allow everything)
1302
+ */
1303
+ CUBLASLT_MATMUL_PREF_IMPL_MASK = 12,
1304
+ } cublasLtMatmulPreferenceAttributes_t;
1305
+
1306
+ /** Internal. Do not use directly.
1307
+ */
1308
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceInit_internal(cublasLtMatmulPreference_t pref, size_t size);
1309
+
1310
+ /** Initialize matmul heuristic search preference descriptor in pre-allocated space.
1311
+ *
1312
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if size of the pre-allocated space is insufficient
1313
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1314
+ */
1315
+ static inline cublasStatus_t cublasLtMatmulPreferenceInit(cublasLtMatmulPreference_t pref) {
1316
+ return cublasLtMatmulPreferenceInit_internal(pref, sizeof(*pref));
1317
+ }
1318
+
1319
+ /** Create new matmul heuristic search preference descriptor.
1320
+ *
1321
+ * \retval CUBLAS_STATUS_ALLOC_FAILED if memory could not be allocated
1322
+ * \retval CUBLAS_STATUS_SUCCESS if desciptor was created successfully
1323
+ */
1324
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceCreate(cublasLtMatmulPreference_t* pref);
1325
+
1326
+ /** Destroy matmul heuristic search preference descriptor.
1327
+ *
1328
+ * \retval CUBLAS_STATUS_SUCCESS if operation was successful
1329
+ */
1330
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceDestroy(cublasLtMatmulPreference_t pref);
1331
+
1332
+ /** Set matmul heuristic search preference descriptor attribute.
1333
+ *
1334
+ * \param[in] pref The descriptor
1335
+ * \param[in] attr The attribute
1336
+ * \param[in] buf memory address containing the new value
1337
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1338
+ *
1339
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1340
+ * selected attribute
1341
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1342
+ */
1343
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceSetAttribute( //
1344
+ cublasLtMatmulPreference_t pref,
1345
+ cublasLtMatmulPreferenceAttributes_t attr,
1346
+ const void* buf,
1347
+ size_t sizeInBytes);
1348
+
1349
+ /** Get matmul heuristic search preference descriptor attribute.
1350
+ *
1351
+ * \param[in] pref The descriptor
1352
+ * \param[in] attr The attribute
1353
+ * \param[out] buf memory address containing the new value
1354
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1355
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1356
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1357
+ *
1358
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1359
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1360
+ * selected attribute
1361
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1362
+ */
1363
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulPreferenceGetAttribute( //
1364
+ cublasLtMatmulPreference_t pref,
1365
+ cublasLtMatmulPreferenceAttributes_t attr,
1366
+ void* buf,
1367
+ size_t sizeInBytes,
1368
+ size_t* sizeWritten);
1369
+
1370
+ /** Results structure used by cublasLtMatmulGetAlgo.
1371
+ *
1372
+ * Holds returned configured algo descriptor and its runtime properties.
1373
+ */
1374
+ typedef struct {
1375
+ /** Matmul algorithm descriptor.
1376
+ *
1377
+ * Must be initialized with cublasLtMatmulAlgoInit() if preferences' CUBLASLT_MATMUL_PERF_SEARCH_MODE is set to
1378
+ * CUBLASLT_SEARCH_LIMITED_BY_ALGO_ID
1379
+ */
1380
+ cublasLtMatmulAlgo_t algo;
1381
+
1382
+ /** Actual size of workspace memory required.
1383
+ */
1384
+ size_t workspaceSize;
1385
+
1386
+ /** Result status, other fields are only valid if after call to cublasLtMatmulAlgoGetHeuristic() this member is set to
1387
+ * CUBLAS_STATUS_SUCCESS.
1388
+ */
1389
+ cublasStatus_t state;
1390
+
1391
+ /** Waves count - a device utilization metric.
1392
+ *
1393
+ * wavesCount value of 1.0f suggests that when kernel is launched it will fully occupy the GPU.
1394
+ */
1395
+ float wavesCount;
1396
+
1397
+ int reserved[4];
1398
+ } cublasLtMatmulHeuristicResult_t;
1399
+
1400
+ /** Query cublasLt heuristic for algorithm appropriate for given use case.
1401
+ *
1402
+ * \param[in] lightHandle Pointer to the allocated cuBLASLt handle for the cuBLASLt
1403
+ * context. See cublasLtHandle_t.
1404
+ * \param[in] operationDesc Handle to the matrix multiplication descriptor.
1405
+ * \param[in] Adesc Handle to the layout descriptors for matrix A.
1406
+ * \param[in] Bdesc Handle to the layout descriptors for matrix B.
1407
+ * \param[in] Cdesc Handle to the layout descriptors for matrix C.
1408
+ * \param[in] Ddesc Handle to the layout descriptors for matrix D.
1409
+ * \param[in] preference Pointer to the structure holding the heuristic search
1410
+ * preferences descriptor. See cublasLtMatrixLayout_t.
1411
+ * \param[in] requestedAlgoCount Size of heuristicResultsArray (in elements) and requested
1412
+ * maximum number of algorithms to return.
1413
+ * \param[in, out] heuristicResultsArray Output algorithms and associated runtime characteristics,
1414
+ * ordered in increasing estimated compute time.
1415
+ * \param[out] returnAlgoCount The number of heuristicResultsArray elements written.
1416
+ *
1417
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1418
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if no heuristic function available for current configuration
1419
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect
1420
+ * heuristicResultsArray[0 to (returnAlgoCount - 1)].state
1421
+ * for detail status of results
1422
+ */
1423
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetHeuristic(cublasLtHandle_t lightHandle,
1424
+ cublasLtMatmulDesc_t operationDesc,
1425
+ cublasLtMatrixLayout_t Adesc,
1426
+ cublasLtMatrixLayout_t Bdesc,
1427
+ cublasLtMatrixLayout_t Cdesc,
1428
+ cublasLtMatrixLayout_t Ddesc,
1429
+ cublasLtMatmulPreference_t preference,
1430
+ int requestedAlgoCount,
1431
+ cublasLtMatmulHeuristicResult_t heuristicResultsArray[],
1432
+ int* returnAlgoCount);
1433
+
1434
+ /* ---------------------------------------------------------------------------------------*/
1435
+ /* Lower level API to be able to implement own Heuristic and Find routines */
1436
+ /* ---------------------------------------------------------------------------------------*/
1437
+
1438
+ /** Routine to get all algo IDs that can potentially run
1439
+ *
1440
+ * \param[in] int requestedAlgoCount requested number of algos (must be less or equal to size of algoIdsA
1441
+ * (in elements)) \param[out] algoIdsA array to write algoIds to \param[out] returnAlgoCount number of algoIds
1442
+ * actually written
1443
+ *
1444
+ * \retval CUBLAS_STATUS_INVALID_VALUE if requestedAlgoCount is less or equal to zero
1445
+ * \retval CUBLAS_STATUS_SUCCESS if query was successful, inspect returnAlgoCount to get actual number of IDs
1446
+ * available
1447
+ */
1448
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoGetIds(cublasLtHandle_t lightHandle,
1449
+ cublasComputeType_t computeType,
1450
+ cudaDataType_t scaleType,
1451
+ cudaDataType_t Atype,
1452
+ cudaDataType_t Btype,
1453
+ cudaDataType_t Ctype,
1454
+ cudaDataType_t Dtype,
1455
+ int requestedAlgoCount,
1456
+ int algoIdsArray[],
1457
+ int* returnAlgoCount);
1458
+
1459
+ /** Initialize algo structure
1460
+ *
1461
+ * \retval CUBLAS_STATUS_INVALID_VALUE if algo is NULL or algoId is outside of recognized range
1462
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algoId is not supported for given combination of data types
1463
+ * \retval CUBLAS_STATUS_SUCCESS if the structure was successfully initialized
1464
+ */
1465
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoInit(cublasLtHandle_t lightHandle,
1466
+ cublasComputeType_t computeType,
1467
+ cudaDataType_t scaleType,
1468
+ cudaDataType_t Atype,
1469
+ cudaDataType_t Btype,
1470
+ cudaDataType_t Ctype,
1471
+ cudaDataType_t Dtype,
1472
+ int algoId,
1473
+ cublasLtMatmulAlgo_t* algo);
1474
+
1475
+ /** Check configured algo descriptor for correctness and support on current device.
1476
+ *
1477
+ * Result includes required workspace size and calculated wave count.
1478
+ *
1479
+ * CUBLAS_STATUS_SUCCESS doesn't fully guarantee algo will run (will fail if e.g. buffers are not correctly aligned);
1480
+ * but if cublasLtMatmulAlgoCheck fails, the algo will not run.
1481
+ *
1482
+ * \param[in] algo algo configuration to check
1483
+ * \param[out] result result structure to report algo runtime characteristics; algo field is never updated
1484
+ *
1485
+ * \retval CUBLAS_STATUS_INVALID_VALUE if matrix layout descriptors or operation descriptor don't match algo
1486
+ * descriptor
1487
+ * \retval CUBLAS_STATUS_NOT_SUPPORTED if algo configuration or data type combination is not currently supported on
1488
+ * given device
1489
+ * \retval CUBLAS_STATUS_ARCH_MISMATCH if algo configuration cannot be run using the selected device
1490
+ * \retval CUBLAS_STATUS_SUCCESS if check was successful
1491
+ */
1492
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCheck( //
1493
+ cublasLtHandle_t lightHandle,
1494
+ cublasLtMatmulDesc_t operationDesc,
1495
+ cublasLtMatrixLayout_t Adesc,
1496
+ cublasLtMatrixLayout_t Bdesc,
1497
+ cublasLtMatrixLayout_t Cdesc,
1498
+ cublasLtMatrixLayout_t Ddesc,
1499
+ const cublasLtMatmulAlgo_t* algo, ///< may point to result->algo
1500
+ cublasLtMatmulHeuristicResult_t* result);
1501
+
1502
+ /** Capabilities Attributes that can be retrieved from an initialized Algo structure
1503
+ */
1504
+ typedef enum {
1505
+ /** support for split K, see CUBLASLT_ALGO_CONFIG_SPLITK_NUM
1506
+ *
1507
+ * int32_t, 0 means no support, supported otherwise
1508
+ */
1509
+ CUBLASLT_ALGO_CAP_SPLITK_SUPPORT = 0,
1510
+
1511
+ /** reduction scheme mask, see cublasLtReductionScheme_t; shows supported reduction schemes, if reduction scheme is
1512
+ * not masked out it is supported.
1513
+ *
1514
+ * e.g. int isReductionSchemeComputeTypeSupported ? (reductionSchemeMask & CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE) ==
1515
+ * CUBLASLT_REDUCTION_SCHEME_COMPUTE_TYPE ? 1 : 0;
1516
+ *
1517
+ * uint32_t
1518
+ */
1519
+ CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK = 1,
1520
+
1521
+ /** support for cta swizzling, see CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING
1522
+ *
1523
+ * uint32_t, 0 means no support, 1 means supported value of 1, other values are reserved
1524
+ */
1525
+ CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT = 2,
1526
+
1527
+ /** support strided batch
1528
+ *
1529
+ * int32_t, 0 means no support, supported otherwise
1530
+ */
1531
+ CUBLASLT_ALGO_CAP_STRIDED_BATCH_SUPPORT = 3,
1532
+
1533
+ /** support results out of place (D != C in D = alpha.A.B + beta.C)
1534
+ *
1535
+ * int32_t, 0 means no support, supported otherwise
1536
+ */
1537
+ CUBLASLT_ALGO_CAP_OUT_OF_PLACE_RESULT_SUPPORT = 4,
1538
+
1539
+ /** syrk/herk support (on top of regular gemm)
1540
+ *
1541
+ * int32_t, 0 means no support, supported otherwise
1542
+ */
1543
+ CUBLASLT_ALGO_CAP_UPLO_SUPPORT = 5,
1544
+
1545
+ /** tile ids possible to use, see cublasLtMatmulTile_t; if no tile ids are supported use
1546
+ * CUBLASLT_MATMUL_TILE_UNDEFINED
1547
+ *
1548
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1549
+ *
1550
+ * array of uint32_t
1551
+ */
1552
+ CUBLASLT_ALGO_CAP_TILE_IDS = 6,
1553
+
1554
+ /** custom option range is from 0 to CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX (inclusive), see
1555
+ * CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION
1556
+ *
1557
+ * int32_t
1558
+ */
1559
+ CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX = 7,
1560
+
1561
+ /** whether algorithm supports custom (not COL or ROW memory order), see cublasLtOrder_t
1562
+ *
1563
+ * int32_t 0 means only COL and ROW memory order is allowed, non-zero means that algo might have different
1564
+ * requirements;
1565
+ */
1566
+ CUBLASLT_ALGO_CAP_CUSTOM_MEMORY_ORDER = 10,
1567
+
1568
+ /** bitmask enumerating pointer modes algorithm supports
1569
+ *
1570
+ * uint32_t, see cublasLtPointerModeMask_t
1571
+ */
1572
+ CUBLASLT_ALGO_CAP_POINTER_MODE_MASK = 11,
1573
+
1574
+ /** bitmask enumerating kinds of postprocessing algorithm supports in the epilogue
1575
+ *
1576
+ * uint32_t, see cublasLtEpilogue_t
1577
+ */
1578
+ CUBLASLT_ALGO_CAP_EPILOGUE_MASK = 12,
1579
+
1580
+ /** stages ids possible to use, see cublasLtMatmulStages_t; if no stages ids are supported use
1581
+ * CUBLASLT_MATMUL_STAGES_UNDEFINED
1582
+ *
1583
+ * use cublasLtMatmulAlgoCapGetAttribute() with sizeInBytes=0 to query actual count
1584
+ *
1585
+ * array of uint32_t
1586
+ */
1587
+ CUBLASLT_ALGO_CAP_STAGES_IDS = 13,
1588
+
1589
+ /** support for nagative ld for all of the matrices
1590
+ *
1591
+ * int32_t 0 means no support, supported otherwise
1592
+ */
1593
+ CUBLASLT_ALGO_CAP_LD_NEGATIVE = 14,
1594
+
1595
+ /** details about algorithm's implementation that affect it's numerical behavior
1596
+ *
1597
+ * uint64_t, see cublasLtNumericalImplFlags_t
1598
+ */
1599
+ CUBLASLT_ALGO_CAP_NUMERICAL_IMPL_FLAGS = 15,
1600
+
1601
+ /** minimum alignment required for A matrix in bytes
1602
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1603
+ *
1604
+ * uint32_t
1605
+ */
1606
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_A_BYTES = 16,
1607
+
1608
+ /** minimum alignment required for B matrix in bytes
1609
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1610
+ *
1611
+ * uint32_t
1612
+ */
1613
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_B_BYTES = 17,
1614
+
1615
+ /** minimum alignment required for C matrix in bytes
1616
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1617
+ *
1618
+ * uint32_t
1619
+ */
1620
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_C_BYTES = 18,
1621
+
1622
+ /** minimum alignment required for D matrix in bytes
1623
+ * (required for buffer pointer, leading dimension, and possibly other strides defined for matrix memory order)
1624
+ *
1625
+ * uint32_t
1626
+ */
1627
+ CUBLASLT_ALGO_CAP_MIN_ALIGNMENT_D_BYTES = 19,
1628
+ } cublasLtMatmulAlgoCapAttributes_t;
1629
+
1630
+ /** Get algo capability attribute.
1631
+ *
1632
+ * E.g. to get list of supported Tile IDs:
1633
+ * cublasLtMatmulTile_t tiles[CUBLASLT_MATMUL_TILE_END];
1634
+ * size_t num_tiles, size_written;
1635
+ * if (cublasLtMatmulAlgoCapGetAttribute(algo, CUBLASLT_ALGO_CAP_TILE_IDS, tiles, sizeof(tiles), size_written) ==
1636
+ * CUBLAS_STATUS_SUCCESS) { num_tiles = size_written / sizeof(tiles[0]);
1637
+ * }
1638
+ *
1639
+ * \param[in] algo The algo descriptor
1640
+ * \param[in] attr The attribute
1641
+ * \param[out] buf memory address containing the new value
1642
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1643
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1644
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1645
+ *
1646
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1647
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1648
+ * selected attribute
1649
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1650
+ */
1651
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoCapGetAttribute(const cublasLtMatmulAlgo_t* algo,
1652
+ cublasLtMatmulAlgoCapAttributes_t attr,
1653
+ void* buf,
1654
+ size_t sizeInBytes,
1655
+ size_t* sizeWritten);
1656
+
1657
+ /** Algo Configuration Attributes that can be set according to the Algo capabilities
1658
+ */
1659
+ typedef enum {
1660
+ /** algorithm index, see cublasLtMatmulAlgoGetIds()
1661
+ *
1662
+ * readonly, set by cublasLtMatmulAlgoInit()
1663
+ * int32_t
1664
+ */
1665
+ CUBLASLT_ALGO_CONFIG_ID = 0,
1666
+ /** tile id, see cublasLtMatmulTile_t
1667
+ *
1668
+ * uint32_t, default: CUBLASLT_MATMUL_TILE_UNDEFINED
1669
+ */
1670
+ CUBLASLT_ALGO_CONFIG_TILE_ID = 1,
1671
+ /** Number of K splits. If the number of K splits is greater than one, SPLITK_NUM parts
1672
+ * of matrix multiplication will be computed in parallel. The results will be accumulated
1673
+ * according to CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME
1674
+ *
1675
+ * int32_t, default: 1
1676
+ */
1677
+ CUBLASLT_ALGO_CONFIG_SPLITK_NUM = 2,
1678
+ /** reduction scheme, see cublasLtReductionScheme_t
1679
+ *
1680
+ * uint32_t, default: CUBLASLT_REDUCTION_SCHEME_NONE
1681
+ */
1682
+ CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME = 3,
1683
+ /** cta swizzling, change mapping from CUDA grid coordinates to parts of the matrices
1684
+ *
1685
+ * possible values: 0, 1, other values reserved
1686
+ *
1687
+ * uint32_t, default: 0
1688
+ */
1689
+ CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING = 4,
1690
+ /** custom option, each algorithm can support some custom options that don't fit description of the other config
1691
+ * attributes, see CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX to get accepted range for any specific case
1692
+ *
1693
+ * uint32_t, default: 0
1694
+ */
1695
+ CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION = 5,
1696
+ /** stages id, see cublasLtMatmulStages_t
1697
+ *
1698
+ * uint32_t, default: CUBLASLT_MATMUL_STAGES_UNDEFINED
1699
+ */
1700
+ CUBLASLT_ALGO_CONFIG_STAGES_ID = 6,
1701
+ /** inner shape id, see cublasLtMatmulInnerShape_t
1702
+ *
1703
+ * uint16_t, default: 0 (CUBLASLT_MATMUL_INNER_SHAPE_UNDEFINED)
1704
+ */
1705
+ CUBLASLT_ALGO_CONFIG_INNER_SHAPE_ID = 7,
1706
+ /** Thread Block Cluster shape id, see cublasLtClusterShape_t. Defines cluster size to use.
1707
+ *
1708
+ * uint16_t, default: 0 (CUBLASLT_CLUSTER_SHAPE_AUTO)
1709
+ */
1710
+ CUBLASLT_ALGO_CONFIG_CLUSTER_SHAPE_ID = 8,
1711
+ } cublasLtMatmulAlgoConfigAttributes_t;
1712
+
1713
+ /** Set algo configuration attribute.
1714
+ *
1715
+ * \param[in] algo The algo descriptor
1716
+ * \param[in] attr The attribute
1717
+ * \param[in] buf memory address containing the new value
1718
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1719
+ *
1720
+ * \retval CUBLAS_STATUS_INVALID_VALUE if buf is NULL or sizeInBytes doesn't match size of internal storage for
1721
+ * selected attribute
1722
+ * \retval CUBLAS_STATUS_SUCCESS if attribute was set successfully
1723
+ */
1724
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigSetAttribute(cublasLtMatmulAlgo_t* algo,
1725
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1726
+ const void* buf,
1727
+ size_t sizeInBytes);
1728
+
1729
+ /** Get algo configuration attribute.
1730
+ *
1731
+ * \param[in] algo The algo descriptor
1732
+ * \param[in] attr The attribute
1733
+ * \param[out] buf memory address containing the new value
1734
+ * \param[in] sizeInBytes size of buf buffer for verification (in bytes)
1735
+ * \param[out] sizeWritten only valid when return value is CUBLAS_STATUS_SUCCESS. If sizeInBytes is non-zero: number of
1736
+ * bytes actually written, if sizeInBytes is 0: number of bytes needed to write full contents
1737
+ *
1738
+ * \retval CUBLAS_STATUS_INVALID_VALUE if sizeInBytes is 0 and sizeWritten is NULL, or if sizeInBytes is non-zero
1739
+ * and buf is NULL or sizeInBytes doesn't match size of internal storage for
1740
+ * selected attribute
1741
+ * \retval CUBLAS_STATUS_SUCCESS if attribute's value was successfully written to user memory
1742
+ */
1743
+ cublasStatus_t CUBLASWINAPI cublasLtMatmulAlgoConfigGetAttribute(const cublasLtMatmulAlgo_t* algo,
1744
+ cublasLtMatmulAlgoConfigAttributes_t attr,
1745
+ void* buf,
1746
+ size_t sizeInBytes,
1747
+ size_t* sizeWritten);
1748
+
1749
+ /** Experimental: Logger callback type.
1750
+ */
1751
+ typedef void (*cublasLtLoggerCallback_t)(int logLevel, const char* functionName, const char* message);
1752
+
1753
+ /** Experimental: Logger callback setter.
1754
+ *
1755
+ * \param[in] callback a user defined callback function to be called by the logger
1756
+ *
1757
+ * \retval CUBLAS_STATUS_SUCCESS if callback was set successfully
1758
+ */
1759
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetCallback(cublasLtLoggerCallback_t callback);
1760
+
1761
+ /** Experimental: Log file setter.
1762
+ *
1763
+ * \param[in] file an open file with write permissions
1764
+ *
1765
+ * \retval CUBLAS_STATUS_SUCCESS if log file was set successfully
1766
+ */
1767
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetFile(FILE* file);
1768
+
1769
+ /** Experimental: Open log file.
1770
+ *
1771
+ * \param[in] logFile log file path. if the log file does not exist, it will be created
1772
+ *
1773
+ * \retval CUBLAS_STATUS_SUCCESS if log file was created successfully
1774
+ */
1775
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerOpenFile(const char* logFile);
1776
+
1777
+ /** Experimental: Log level setter.
1778
+ *
1779
+ * \param[in] level log level, should be one of the following:
1780
+ * 0. Off
1781
+ * 1. Errors
1782
+ * 2. Performance Trace
1783
+ * 3. Performance Hints
1784
+ * 4. Heuristics Trace
1785
+ * 5. API Trace
1786
+ *
1787
+ * \retval CUBLAS_STATUS_INVALID_VALUE if log level is not one of the above levels
1788
+ *
1789
+ * \retval CUBLAS_STATUS_SUCCESS if log level was set successfully
1790
+ */
1791
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetLevel(int level);
1792
+
1793
+ /** Experimental: Log mask setter.
1794
+ *
1795
+ * \param[in] mask log mask, should be a combination of the following masks:
1796
+ * 0. Off
1797
+ * 1. Errors
1798
+ * 2. Performance Trace
1799
+ * 4. Performance Hints
1800
+ * 8. Heuristics Trace
1801
+ * 16. API Trace
1802
+ *
1803
+ * \retval CUBLAS_STATUS_SUCCESS if log mask was set successfully
1804
+ */
1805
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerSetMask(int mask);
1806
+
1807
+ /** Experimental: Disable logging for the entire session.
1808
+ *
1809
+ * \retval CUBLAS_STATUS_SUCCESS if disabled logging
1810
+ */
1811
+ cublasStatus_t CUBLASWINAPI cublasLtLoggerForceDisable();
1812
+
1813
+ #if defined(__cplusplus)
1814
+ }
1815
+ #endif /* __cplusplus */
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublasXt.h ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cublasXt : Host API, Out of Core and Multi-GPU BLAS Library
51
+
52
+ */
53
+
54
+ #if !defined(CUBLAS_XT_H_)
55
+ #define CUBLAS_XT_H_
56
+
57
+ #include "driver_types.h"
58
+ #include "cuComplex.h" /* import complex data type */
59
+
60
+ #include "cublas_v2.h"
61
+
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif /* __cplusplus */
65
+
66
+ struct cublasXtContext;
67
+ typedef struct cublasXtContext* cublasXtHandle_t;
68
+
69
+ cublasStatus_t CUBLASWINAPI cublasXtCreate(cublasXtHandle_t* handle);
70
+ cublasStatus_t CUBLASWINAPI cublasXtDestroy(cublasXtHandle_t handle);
71
+ cublasStatus_t CUBLASWINAPI cublasXtGetNumBoards(int nbDevices, int deviceId[], int* nbBoards);
72
+ cublasStatus_t CUBLASWINAPI cublasXtMaxBoards(int* nbGpuBoards);
73
+ /* This routine selects the Gpus that the user want to use for CUBLAS-XT */
74
+ cublasStatus_t CUBLASWINAPI cublasXtDeviceSelect(cublasXtHandle_t handle, int nbDevices, int deviceId[]);
75
+
76
+ /* This routine allows to change the dimension of the tiles ( blockDim x blockDim ) */
77
+ cublasStatus_t CUBLASWINAPI cublasXtSetBlockDim(cublasXtHandle_t handle, int blockDim);
78
+ cublasStatus_t CUBLASWINAPI cublasXtGetBlockDim(cublasXtHandle_t handle, int* blockDim);
79
+
80
+ typedef enum { CUBLASXT_PINNING_DISABLED = 0, CUBLASXT_PINNING_ENABLED = 1 } cublasXtPinnedMemMode_t;
81
+ /* This routine allows to CUBLAS-XT to pin the Host memory if it find out that some of the matrix passed
82
+ are not pinned : Pinning/Unpinning the Host memory is still a costly operation
83
+ It is better if the user controls the memory on its own (by pinning/unpinning oly when necessary)
84
+ */
85
+ cublasStatus_t CUBLASWINAPI cublasXtGetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t* mode);
86
+ cublasStatus_t CUBLASWINAPI cublasXtSetPinningMemMode(cublasXtHandle_t handle, cublasXtPinnedMemMode_t mode);
87
+
88
+ /* This routines is to provide a CPU Blas routines, used for too small sizes or hybrid computation */
89
+ typedef enum {
90
+ CUBLASXT_FLOAT = 0,
91
+ CUBLASXT_DOUBLE = 1,
92
+ CUBLASXT_COMPLEX = 2,
93
+ CUBLASXT_DOUBLECOMPLEX = 3,
94
+ } cublasXtOpType_t;
95
+
96
+ typedef enum {
97
+ CUBLASXT_GEMM = 0,
98
+ CUBLASXT_SYRK = 1,
99
+ CUBLASXT_HERK = 2,
100
+ CUBLASXT_SYMM = 3,
101
+ CUBLASXT_HEMM = 4,
102
+ CUBLASXT_TRSM = 5,
103
+ CUBLASXT_SYR2K = 6,
104
+ CUBLASXT_HER2K = 7,
105
+
106
+ CUBLASXT_SPMM = 8,
107
+ CUBLASXT_SYRKX = 9,
108
+ CUBLASXT_HERKX = 10,
109
+ CUBLASXT_TRMM = 11,
110
+ CUBLASXT_ROUTINE_MAX = 12,
111
+ } cublasXtBlasOp_t;
112
+
113
+ /* Currently only 32-bit integer BLAS routines are supported */
114
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRoutine(cublasXtHandle_t handle,
115
+ cublasXtBlasOp_t blasOp,
116
+ cublasXtOpType_t type,
117
+ void* blasFunctor);
118
+
119
+ /* Specified the percentage of work that should done by the CPU, default is 0 (no work) */
120
+ cublasStatus_t CUBLASWINAPI cublasXtSetCpuRatio(cublasXtHandle_t handle,
121
+ cublasXtBlasOp_t blasOp,
122
+ cublasXtOpType_t type,
123
+ float ratio);
124
+
125
+ /* GEMM */
126
+ cublasStatus_t CUBLASWINAPI cublasXtSgemm(cublasXtHandle_t handle,
127
+ cublasOperation_t transa,
128
+ cublasOperation_t transb,
129
+ size_t m,
130
+ size_t n,
131
+ size_t k,
132
+ const float* alpha,
133
+ const float* A,
134
+ size_t lda,
135
+ const float* B,
136
+ size_t ldb,
137
+ const float* beta,
138
+ float* C,
139
+ size_t ldc);
140
+
141
+ cublasStatus_t CUBLASWINAPI cublasXtDgemm(cublasXtHandle_t handle,
142
+ cublasOperation_t transa,
143
+ cublasOperation_t transb,
144
+ size_t m,
145
+ size_t n,
146
+ size_t k,
147
+ const double* alpha,
148
+ const double* A,
149
+ size_t lda,
150
+ const double* B,
151
+ size_t ldb,
152
+ const double* beta,
153
+ double* C,
154
+ size_t ldc);
155
+
156
+ cublasStatus_t CUBLASWINAPI cublasXtCgemm(cublasXtHandle_t handle,
157
+ cublasOperation_t transa,
158
+ cublasOperation_t transb,
159
+ size_t m,
160
+ size_t n,
161
+ size_t k,
162
+ const cuComplex* alpha,
163
+ const cuComplex* A,
164
+ size_t lda,
165
+ const cuComplex* B,
166
+ size_t ldb,
167
+ const cuComplex* beta,
168
+ cuComplex* C,
169
+ size_t ldc);
170
+
171
+ cublasStatus_t CUBLASWINAPI cublasXtZgemm(cublasXtHandle_t handle,
172
+ cublasOperation_t transa,
173
+ cublasOperation_t transb,
174
+ size_t m,
175
+ size_t n,
176
+ size_t k,
177
+ const cuDoubleComplex* alpha,
178
+ const cuDoubleComplex* A,
179
+ size_t lda,
180
+ const cuDoubleComplex* B,
181
+ size_t ldb,
182
+ const cuDoubleComplex* beta,
183
+ cuDoubleComplex* C,
184
+ size_t ldc);
185
+ /* ------------------------------------------------------- */
186
+ /* SYRK */
187
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrk(cublasXtHandle_t handle,
188
+ cublasFillMode_t uplo,
189
+ cublasOperation_t trans,
190
+ size_t n,
191
+ size_t k,
192
+ const float* alpha,
193
+ const float* A,
194
+ size_t lda,
195
+ const float* beta,
196
+ float* C,
197
+ size_t ldc);
198
+
199
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrk(cublasXtHandle_t handle,
200
+ cublasFillMode_t uplo,
201
+ cublasOperation_t trans,
202
+ size_t n,
203
+ size_t k,
204
+ const double* alpha,
205
+ const double* A,
206
+ size_t lda,
207
+ const double* beta,
208
+ double* C,
209
+ size_t ldc);
210
+
211
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrk(cublasXtHandle_t handle,
212
+ cublasFillMode_t uplo,
213
+ cublasOperation_t trans,
214
+ size_t n,
215
+ size_t k,
216
+ const cuComplex* alpha,
217
+ const cuComplex* A,
218
+ size_t lda,
219
+ const cuComplex* beta,
220
+ cuComplex* C,
221
+ size_t ldc);
222
+
223
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrk(cublasXtHandle_t handle,
224
+ cublasFillMode_t uplo,
225
+ cublasOperation_t trans,
226
+ size_t n,
227
+ size_t k,
228
+ const cuDoubleComplex* alpha,
229
+ const cuDoubleComplex* A,
230
+ size_t lda,
231
+ const cuDoubleComplex* beta,
232
+ cuDoubleComplex* C,
233
+ size_t ldc);
234
+ /* -------------------------------------------------------------------- */
235
+ /* HERK */
236
+ cublasStatus_t CUBLASWINAPI cublasXtCherk(cublasXtHandle_t handle,
237
+ cublasFillMode_t uplo,
238
+ cublasOperation_t trans,
239
+ size_t n,
240
+ size_t k,
241
+ const float* alpha,
242
+ const cuComplex* A,
243
+ size_t lda,
244
+ const float* beta,
245
+ cuComplex* C,
246
+ size_t ldc);
247
+
248
+ cublasStatus_t CUBLASWINAPI cublasXtZherk(cublasXtHandle_t handle,
249
+ cublasFillMode_t uplo,
250
+ cublasOperation_t trans,
251
+ size_t n,
252
+ size_t k,
253
+ const double* alpha,
254
+ const cuDoubleComplex* A,
255
+ size_t lda,
256
+ const double* beta,
257
+ cuDoubleComplex* C,
258
+ size_t ldc);
259
+ /* -------------------------------------------------------------------- */
260
+ /* SYR2K */
261
+ cublasStatus_t CUBLASWINAPI cublasXtSsyr2k(cublasXtHandle_t handle,
262
+ cublasFillMode_t uplo,
263
+ cublasOperation_t trans,
264
+ size_t n,
265
+ size_t k,
266
+ const float* alpha,
267
+ const float* A,
268
+ size_t lda,
269
+ const float* B,
270
+ size_t ldb,
271
+ const float* beta,
272
+ float* C,
273
+ size_t ldc);
274
+
275
+ cublasStatus_t CUBLASWINAPI cublasXtDsyr2k(cublasXtHandle_t handle,
276
+ cublasFillMode_t uplo,
277
+ cublasOperation_t trans,
278
+ size_t n,
279
+ size_t k,
280
+ const double* alpha,
281
+ const double* A,
282
+ size_t lda,
283
+ const double* B,
284
+ size_t ldb,
285
+ const double* beta,
286
+ double* C,
287
+ size_t ldc);
288
+
289
+ cublasStatus_t CUBLASWINAPI cublasXtCsyr2k(cublasXtHandle_t handle,
290
+ cublasFillMode_t uplo,
291
+ cublasOperation_t trans,
292
+ size_t n,
293
+ size_t k,
294
+ const cuComplex* alpha,
295
+ const cuComplex* A,
296
+ size_t lda,
297
+ const cuComplex* B,
298
+ size_t ldb,
299
+ const cuComplex* beta,
300
+ cuComplex* C,
301
+ size_t ldc);
302
+
303
+ cublasStatus_t CUBLASWINAPI cublasXtZsyr2k(cublasXtHandle_t handle,
304
+ cublasFillMode_t uplo,
305
+ cublasOperation_t trans,
306
+ size_t n,
307
+ size_t k,
308
+ const cuDoubleComplex* alpha,
309
+ const cuDoubleComplex* A,
310
+ size_t lda,
311
+ const cuDoubleComplex* B,
312
+ size_t ldb,
313
+ const cuDoubleComplex* beta,
314
+ cuDoubleComplex* C,
315
+ size_t ldc);
316
+ /* -------------------------------------------------------------------- */
317
+ /* HERKX : variant extension of HERK */
318
+ cublasStatus_t CUBLASWINAPI cublasXtCherkx(cublasXtHandle_t handle,
319
+ cublasFillMode_t uplo,
320
+ cublasOperation_t trans,
321
+ size_t n,
322
+ size_t k,
323
+ const cuComplex* alpha,
324
+ const cuComplex* A,
325
+ size_t lda,
326
+ const cuComplex* B,
327
+ size_t ldb,
328
+ const float* beta,
329
+ cuComplex* C,
330
+ size_t ldc);
331
+
332
+ cublasStatus_t CUBLASWINAPI cublasXtZherkx(cublasXtHandle_t handle,
333
+ cublasFillMode_t uplo,
334
+ cublasOperation_t trans,
335
+ size_t n,
336
+ size_t k,
337
+ const cuDoubleComplex* alpha,
338
+ const cuDoubleComplex* A,
339
+ size_t lda,
340
+ const cuDoubleComplex* B,
341
+ size_t ldb,
342
+ const double* beta,
343
+ cuDoubleComplex* C,
344
+ size_t ldc);
345
+
346
+ /* -------------------------------------------------------------------- */
347
+ /* TRSM */
348
+ cublasStatus_t CUBLASWINAPI cublasXtStrsm(cublasXtHandle_t handle,
349
+ cublasSideMode_t side,
350
+ cublasFillMode_t uplo,
351
+ cublasOperation_t trans,
352
+ cublasDiagType_t diag,
353
+ size_t m,
354
+ size_t n,
355
+ const float* alpha,
356
+ const float* A,
357
+ size_t lda,
358
+ float* B,
359
+ size_t ldb);
360
+
361
+ cublasStatus_t CUBLASWINAPI cublasXtDtrsm(cublasXtHandle_t handle,
362
+ cublasSideMode_t side,
363
+ cublasFillMode_t uplo,
364
+ cublasOperation_t trans,
365
+ cublasDiagType_t diag,
366
+ size_t m,
367
+ size_t n,
368
+ const double* alpha,
369
+ const double* A,
370
+ size_t lda,
371
+ double* B,
372
+ size_t ldb);
373
+
374
+ cublasStatus_t CUBLASWINAPI cublasXtCtrsm(cublasXtHandle_t handle,
375
+ cublasSideMode_t side,
376
+ cublasFillMode_t uplo,
377
+ cublasOperation_t trans,
378
+ cublasDiagType_t diag,
379
+ size_t m,
380
+ size_t n,
381
+ const cuComplex* alpha,
382
+ const cuComplex* A,
383
+ size_t lda,
384
+ cuComplex* B,
385
+ size_t ldb);
386
+
387
+ cublasStatus_t CUBLASWINAPI cublasXtZtrsm(cublasXtHandle_t handle,
388
+ cublasSideMode_t side,
389
+ cublasFillMode_t uplo,
390
+ cublasOperation_t trans,
391
+ cublasDiagType_t diag,
392
+ size_t m,
393
+ size_t n,
394
+ const cuDoubleComplex* alpha,
395
+ const cuDoubleComplex* A,
396
+ size_t lda,
397
+ cuDoubleComplex* B,
398
+ size_t ldb);
399
+ /* -------------------------------------------------------------------- */
400
+ /* SYMM : Symmetric Multiply Matrix*/
401
+ cublasStatus_t CUBLASWINAPI cublasXtSsymm(cublasXtHandle_t handle,
402
+ cublasSideMode_t side,
403
+ cublasFillMode_t uplo,
404
+ size_t m,
405
+ size_t n,
406
+ const float* alpha,
407
+ const float* A,
408
+ size_t lda,
409
+ const float* B,
410
+ size_t ldb,
411
+ const float* beta,
412
+ float* C,
413
+ size_t ldc);
414
+
415
+ cublasStatus_t CUBLASWINAPI cublasXtDsymm(cublasXtHandle_t handle,
416
+ cublasSideMode_t side,
417
+ cublasFillMode_t uplo,
418
+ size_t m,
419
+ size_t n,
420
+ const double* alpha,
421
+ const double* A,
422
+ size_t lda,
423
+ const double* B,
424
+ size_t ldb,
425
+ const double* beta,
426
+ double* C,
427
+ size_t ldc);
428
+
429
+ cublasStatus_t CUBLASWINAPI cublasXtCsymm(cublasXtHandle_t handle,
430
+ cublasSideMode_t side,
431
+ cublasFillMode_t uplo,
432
+ size_t m,
433
+ size_t n,
434
+ const cuComplex* alpha,
435
+ const cuComplex* A,
436
+ size_t lda,
437
+ const cuComplex* B,
438
+ size_t ldb,
439
+ const cuComplex* beta,
440
+ cuComplex* C,
441
+ size_t ldc);
442
+
443
+ cublasStatus_t CUBLASWINAPI cublasXtZsymm(cublasXtHandle_t handle,
444
+ cublasSideMode_t side,
445
+ cublasFillMode_t uplo,
446
+ size_t m,
447
+ size_t n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* A,
450
+ size_t lda,
451
+ const cuDoubleComplex* B,
452
+ size_t ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* C,
455
+ size_t ldc);
456
+ /* -------------------------------------------------------------------- */
457
+ /* HEMM : Hermitian Matrix Multiply */
458
+ cublasStatus_t CUBLASWINAPI cublasXtChemm(cublasXtHandle_t handle,
459
+ cublasSideMode_t side,
460
+ cublasFillMode_t uplo,
461
+ size_t m,
462
+ size_t n,
463
+ const cuComplex* alpha,
464
+ const cuComplex* A,
465
+ size_t lda,
466
+ const cuComplex* B,
467
+ size_t ldb,
468
+ const cuComplex* beta,
469
+ cuComplex* C,
470
+ size_t ldc);
471
+
472
+ cublasStatus_t CUBLASWINAPI cublasXtZhemm(cublasXtHandle_t handle,
473
+ cublasSideMode_t side,
474
+ cublasFillMode_t uplo,
475
+ size_t m,
476
+ size_t n,
477
+ const cuDoubleComplex* alpha,
478
+ const cuDoubleComplex* A,
479
+ size_t lda,
480
+ const cuDoubleComplex* B,
481
+ size_t ldb,
482
+ const cuDoubleComplex* beta,
483
+ cuDoubleComplex* C,
484
+ size_t ldc);
485
+
486
+ /* -------------------------------------------------------------------- */
487
+ /* SYRKX : variant extension of SYRK */
488
+ cublasStatus_t CUBLASWINAPI cublasXtSsyrkx(cublasXtHandle_t handle,
489
+ cublasFillMode_t uplo,
490
+ cublasOperation_t trans,
491
+ size_t n,
492
+ size_t k,
493
+ const float* alpha,
494
+ const float* A,
495
+ size_t lda,
496
+ const float* B,
497
+ size_t ldb,
498
+ const float* beta,
499
+ float* C,
500
+ size_t ldc);
501
+
502
+ cublasStatus_t CUBLASWINAPI cublasXtDsyrkx(cublasXtHandle_t handle,
503
+ cublasFillMode_t uplo,
504
+ cublasOperation_t trans,
505
+ size_t n,
506
+ size_t k,
507
+ const double* alpha,
508
+ const double* A,
509
+ size_t lda,
510
+ const double* B,
511
+ size_t ldb,
512
+ const double* beta,
513
+ double* C,
514
+ size_t ldc);
515
+
516
+ cublasStatus_t CUBLASWINAPI cublasXtCsyrkx(cublasXtHandle_t handle,
517
+ cublasFillMode_t uplo,
518
+ cublasOperation_t trans,
519
+ size_t n,
520
+ size_t k,
521
+ const cuComplex* alpha,
522
+ const cuComplex* A,
523
+ size_t lda,
524
+ const cuComplex* B,
525
+ size_t ldb,
526
+ const cuComplex* beta,
527
+ cuComplex* C,
528
+ size_t ldc);
529
+
530
+ cublasStatus_t CUBLASWINAPI cublasXtZsyrkx(cublasXtHandle_t handle,
531
+ cublasFillMode_t uplo,
532
+ cublasOperation_t trans,
533
+ size_t n,
534
+ size_t k,
535
+ const cuDoubleComplex* alpha,
536
+ const cuDoubleComplex* A,
537
+ size_t lda,
538
+ const cuDoubleComplex* B,
539
+ size_t ldb,
540
+ const cuDoubleComplex* beta,
541
+ cuDoubleComplex* C,
542
+ size_t ldc);
543
+ /* -------------------------------------------------------------------- */
544
+ /* HER2K : variant extension of HERK */
545
+ cublasStatus_t CUBLASWINAPI cublasXtCher2k(cublasXtHandle_t handle,
546
+ cublasFillMode_t uplo,
547
+ cublasOperation_t trans,
548
+ size_t n,
549
+ size_t k,
550
+ const cuComplex* alpha,
551
+ const cuComplex* A,
552
+ size_t lda,
553
+ const cuComplex* B,
554
+ size_t ldb,
555
+ const float* beta,
556
+ cuComplex* C,
557
+ size_t ldc);
558
+
559
+ cublasStatus_t CUBLASWINAPI cublasXtZher2k(cublasXtHandle_t handle,
560
+ cublasFillMode_t uplo,
561
+ cublasOperation_t trans,
562
+ size_t n,
563
+ size_t k,
564
+ const cuDoubleComplex* alpha,
565
+ const cuDoubleComplex* A,
566
+ size_t lda,
567
+ const cuDoubleComplex* B,
568
+ size_t ldb,
569
+ const double* beta,
570
+ cuDoubleComplex* C,
571
+ size_t ldc);
572
+
573
+ /* -------------------------------------------------------------------- */
574
+ /* SPMM : Symmetric Packed Multiply Matrix*/
575
+ cublasStatus_t CUBLASWINAPI cublasXtSspmm(cublasXtHandle_t handle,
576
+ cublasSideMode_t side,
577
+ cublasFillMode_t uplo,
578
+ size_t m,
579
+ size_t n,
580
+ const float* alpha,
581
+ const float* AP,
582
+ const float* B,
583
+ size_t ldb,
584
+ const float* beta,
585
+ float* C,
586
+ size_t ldc);
587
+
588
+ cublasStatus_t CUBLASWINAPI cublasXtDspmm(cublasXtHandle_t handle,
589
+ cublasSideMode_t side,
590
+ cublasFillMode_t uplo,
591
+ size_t m,
592
+ size_t n,
593
+ const double* alpha,
594
+ const double* AP,
595
+ const double* B,
596
+ size_t ldb,
597
+ const double* beta,
598
+ double* C,
599
+ size_t ldc);
600
+
601
+ cublasStatus_t CUBLASWINAPI cublasXtCspmm(cublasXtHandle_t handle,
602
+ cublasSideMode_t side,
603
+ cublasFillMode_t uplo,
604
+ size_t m,
605
+ size_t n,
606
+ const cuComplex* alpha,
607
+ const cuComplex* AP,
608
+ const cuComplex* B,
609
+ size_t ldb,
610
+ const cuComplex* beta,
611
+ cuComplex* C,
612
+ size_t ldc);
613
+
614
+ cublasStatus_t CUBLASWINAPI cublasXtZspmm(cublasXtHandle_t handle,
615
+ cublasSideMode_t side,
616
+ cublasFillMode_t uplo,
617
+ size_t m,
618
+ size_t n,
619
+ const cuDoubleComplex* alpha,
620
+ const cuDoubleComplex* AP,
621
+ const cuDoubleComplex* B,
622
+ size_t ldb,
623
+ const cuDoubleComplex* beta,
624
+ cuDoubleComplex* C,
625
+ size_t ldc);
626
+
627
+ /* -------------------------------------------------------------------- */
628
+ /* TRMM */
629
+ cublasStatus_t CUBLASWINAPI cublasXtStrmm(cublasXtHandle_t handle,
630
+ cublasSideMode_t side,
631
+ cublasFillMode_t uplo,
632
+ cublasOperation_t trans,
633
+ cublasDiagType_t diag,
634
+ size_t m,
635
+ size_t n,
636
+ const float* alpha,
637
+ const float* A,
638
+ size_t lda,
639
+ const float* B,
640
+ size_t ldb,
641
+ float* C,
642
+ size_t ldc);
643
+
644
+ cublasStatus_t CUBLASWINAPI cublasXtDtrmm(cublasXtHandle_t handle,
645
+ cublasSideMode_t side,
646
+ cublasFillMode_t uplo,
647
+ cublasOperation_t trans,
648
+ cublasDiagType_t diag,
649
+ size_t m,
650
+ size_t n,
651
+ const double* alpha,
652
+ const double* A,
653
+ size_t lda,
654
+ const double* B,
655
+ size_t ldb,
656
+ double* C,
657
+ size_t ldc);
658
+
659
+ cublasStatus_t CUBLASWINAPI cublasXtCtrmm(cublasXtHandle_t handle,
660
+ cublasSideMode_t side,
661
+ cublasFillMode_t uplo,
662
+ cublasOperation_t trans,
663
+ cublasDiagType_t diag,
664
+ size_t m,
665
+ size_t n,
666
+ const cuComplex* alpha,
667
+ const cuComplex* A,
668
+ size_t lda,
669
+ const cuComplex* B,
670
+ size_t ldb,
671
+ cuComplex* C,
672
+ size_t ldc);
673
+
674
+ cublasStatus_t CUBLASWINAPI cublasXtZtrmm(cublasXtHandle_t handle,
675
+ cublasSideMode_t side,
676
+ cublasFillMode_t uplo,
677
+ cublasOperation_t trans,
678
+ cublasDiagType_t diag,
679
+ size_t m,
680
+ size_t n,
681
+ const cuDoubleComplex* alpha,
682
+ const cuDoubleComplex* A,
683
+ size_t lda,
684
+ const cuDoubleComplex* B,
685
+ size_t ldb,
686
+ cuDoubleComplex* C,
687
+ size_t ldc);
688
+
689
+ #if defined(__cplusplus)
690
+ }
691
+ #endif /* __cplusplus */
692
+
693
+ #endif /* !defined(CUBLAS_XT_H_) */
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas_api.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/cublas_v2.h ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * This is the public header file for the new CUBLAS library API, it mapped the generic
52
+ * Cublas name functions to the actual _v2 implementations.
53
+ */
54
+
55
+ #if !defined(CUBLAS_V2_H_)
56
+ #define CUBLAS_V2_H_
57
+
58
+ #if defined(CUBLAS_H_)
59
+ #error "It is an error to include both cublas.h and cublas_v2.h"
60
+ #endif
61
+
62
+ #undef CUBLASAPI
63
+ #ifdef __CUDACC__
64
+ #define CUBLASAPI __host__ __device__
65
+ #else
66
+ #define CUBLASAPI
67
+ #endif
68
+
69
+ #include "cublas_api.h"
70
+
71
+ #define cublasCreate cublasCreate_v2
72
+ #define cublasDestroy cublasDestroy_v2
73
+ #define cublasGetVersion cublasGetVersion_v2
74
+ #define cublasSetWorkspace cublasSetWorkspace_v2
75
+ #define cublasSetStream cublasSetStream_v2
76
+ #define cublasGetStream cublasGetStream_v2
77
+ #define cublasGetPointerMode cublasGetPointerMode_v2
78
+ #define cublasSetPointerMode cublasSetPointerMode_v2
79
+
80
+ /* 32-bit integer */
81
+
82
+ /* Blas1 Routines */
83
+
84
+ #define cublasSnrm2 cublasSnrm2_v2
85
+ #define cublasDnrm2 cublasDnrm2_v2
86
+ #define cublasScnrm2 cublasScnrm2_v2
87
+ #define cublasDznrm2 cublasDznrm2_v2
88
+
89
+ #define cublasSdot cublasSdot_v2
90
+ #define cublasDdot cublasDdot_v2
91
+ #define cublasCdotu cublasCdotu_v2
92
+ #define cublasCdotc cublasCdotc_v2
93
+ #define cublasZdotu cublasZdotu_v2
94
+ #define cublasZdotc cublasZdotc_v2
95
+
96
+ #define cublasSscal cublasSscal_v2
97
+ #define cublasDscal cublasDscal_v2
98
+ #define cublasCscal cublasCscal_v2
99
+ #define cublasCsscal cublasCsscal_v2
100
+ #define cublasZscal cublasZscal_v2
101
+ #define cublasZdscal cublasZdscal_v2
102
+
103
+ #define cublasSaxpy cublasSaxpy_v2
104
+ #define cublasDaxpy cublasDaxpy_v2
105
+ #define cublasCaxpy cublasCaxpy_v2
106
+ #define cublasZaxpy cublasZaxpy_v2
107
+
108
+ #define cublasScopy cublasScopy_v2
109
+ #define cublasDcopy cublasDcopy_v2
110
+ #define cublasCcopy cublasCcopy_v2
111
+ #define cublasZcopy cublasZcopy_v2
112
+
113
+ #define cublasSswap cublasSswap_v2
114
+ #define cublasDswap cublasDswap_v2
115
+ #define cublasCswap cublasCswap_v2
116
+ #define cublasZswap cublasZswap_v2
117
+
118
+ #define cublasIsamax cublasIsamax_v2
119
+ #define cublasIdamax cublasIdamax_v2
120
+ #define cublasIcamax cublasIcamax_v2
121
+ #define cublasIzamax cublasIzamax_v2
122
+
123
+ #define cublasIsamin cublasIsamin_v2
124
+ #define cublasIdamin cublasIdamin_v2
125
+ #define cublasIcamin cublasIcamin_v2
126
+ #define cublasIzamin cublasIzamin_v2
127
+
128
+ #define cublasSasum cublasSasum_v2
129
+ #define cublasDasum cublasDasum_v2
130
+ #define cublasScasum cublasScasum_v2
131
+ #define cublasDzasum cublasDzasum_v2
132
+
133
+ #define cublasSrot cublasSrot_v2
134
+ #define cublasDrot cublasDrot_v2
135
+ #define cublasCrot cublasCrot_v2
136
+ #define cublasCsrot cublasCsrot_v2
137
+ #define cublasZrot cublasZrot_v2
138
+ #define cublasZdrot cublasZdrot_v2
139
+
140
+ #define cublasSrotg cublasSrotg_v2
141
+ #define cublasDrotg cublasDrotg_v2
142
+ #define cublasCrotg cublasCrotg_v2
143
+ #define cublasZrotg cublasZrotg_v2
144
+
145
+ #define cublasSrotm cublasSrotm_v2
146
+ #define cublasDrotm cublasDrotm_v2
147
+
148
+ #define cublasSrotmg cublasSrotmg_v2
149
+ #define cublasDrotmg cublasDrotmg_v2
150
+
151
+ /* Blas2 Routines */
152
+
153
+ #define cublasSgemv cublasSgemv_v2
154
+ #define cublasDgemv cublasDgemv_v2
155
+ #define cublasCgemv cublasCgemv_v2
156
+ #define cublasZgemv cublasZgemv_v2
157
+
158
+ #define cublasSgbmv cublasSgbmv_v2
159
+ #define cublasDgbmv cublasDgbmv_v2
160
+ #define cublasCgbmv cublasCgbmv_v2
161
+ #define cublasZgbmv cublasZgbmv_v2
162
+
163
+ #define cublasStrmv cublasStrmv_v2
164
+ #define cublasDtrmv cublasDtrmv_v2
165
+ #define cublasCtrmv cublasCtrmv_v2
166
+ #define cublasZtrmv cublasZtrmv_v2
167
+
168
+ #define cublasStbmv cublasStbmv_v2
169
+ #define cublasDtbmv cublasDtbmv_v2
170
+ #define cublasCtbmv cublasCtbmv_v2
171
+ #define cublasZtbmv cublasZtbmv_v2
172
+
173
+ #define cublasStpmv cublasStpmv_v2
174
+ #define cublasDtpmv cublasDtpmv_v2
175
+ #define cublasCtpmv cublasCtpmv_v2
176
+ #define cublasZtpmv cublasZtpmv_v2
177
+
178
+ #define cublasStrsv cublasStrsv_v2
179
+ #define cublasDtrsv cublasDtrsv_v2
180
+ #define cublasCtrsv cublasCtrsv_v2
181
+ #define cublasZtrsv cublasZtrsv_v2
182
+
183
+ #define cublasStpsv cublasStpsv_v2
184
+ #define cublasDtpsv cublasDtpsv_v2
185
+ #define cublasCtpsv cublasCtpsv_v2
186
+ #define cublasZtpsv cublasZtpsv_v2
187
+
188
+ #define cublasStbsv cublasStbsv_v2
189
+ #define cublasDtbsv cublasDtbsv_v2
190
+ #define cublasCtbsv cublasCtbsv_v2
191
+ #define cublasZtbsv cublasZtbsv_v2
192
+
193
+ #define cublasSsymv cublasSsymv_v2
194
+ #define cublasDsymv cublasDsymv_v2
195
+ #define cublasCsymv cublasCsymv_v2
196
+ #define cublasZsymv cublasZsymv_v2
197
+ #define cublasChemv cublasChemv_v2
198
+ #define cublasZhemv cublasZhemv_v2
199
+
200
+ #define cublasSsbmv cublasSsbmv_v2
201
+ #define cublasDsbmv cublasDsbmv_v2
202
+ #define cublasChbmv cublasChbmv_v2
203
+ #define cublasZhbmv cublasZhbmv_v2
204
+
205
+ #define cublasSspmv cublasSspmv_v2
206
+ #define cublasDspmv cublasDspmv_v2
207
+ #define cublasChpmv cublasChpmv_v2
208
+ #define cublasZhpmv cublasZhpmv_v2
209
+
210
+ #define cublasSger cublasSger_v2
211
+ #define cublasDger cublasDger_v2
212
+ #define cublasCgeru cublasCgeru_v2
213
+ #define cublasCgerc cublasCgerc_v2
214
+ #define cublasZgeru cublasZgeru_v2
215
+ #define cublasZgerc cublasZgerc_v2
216
+
217
+ #define cublasSsyr cublasSsyr_v2
218
+ #define cublasDsyr cublasDsyr_v2
219
+ #define cublasCsyr cublasCsyr_v2
220
+ #define cublasZsyr cublasZsyr_v2
221
+ #define cublasCher cublasCher_v2
222
+ #define cublasZher cublasZher_v2
223
+
224
+ #define cublasSspr cublasSspr_v2
225
+ #define cublasDspr cublasDspr_v2
226
+ #define cublasChpr cublasChpr_v2
227
+ #define cublasZhpr cublasZhpr_v2
228
+
229
+ #define cublasSsyr2 cublasSsyr2_v2
230
+ #define cublasDsyr2 cublasDsyr2_v2
231
+ #define cublasCsyr2 cublasCsyr2_v2
232
+ #define cublasZsyr2 cublasZsyr2_v2
233
+ #define cublasCher2 cublasCher2_v2
234
+ #define cublasZher2 cublasZher2_v2
235
+
236
+ #define cublasSspr2 cublasSspr2_v2
237
+ #define cublasDspr2 cublasDspr2_v2
238
+ #define cublasChpr2 cublasChpr2_v2
239
+ #define cublasZhpr2 cublasZhpr2_v2
240
+
241
+ /* Blas3 Routines */
242
+
243
+ #define cublasSgemm cublasSgemm_v2
244
+ #define cublasDgemm cublasDgemm_v2
245
+ #define cublasCgemm cublasCgemm_v2
246
+ #define cublasZgemm cublasZgemm_v2
247
+
248
+ #define cublasSsyrk cublasSsyrk_v2
249
+ #define cublasDsyrk cublasDsyrk_v2
250
+ #define cublasCsyrk cublasCsyrk_v2
251
+ #define cublasZsyrk cublasZsyrk_v2
252
+ #define cublasCherk cublasCherk_v2
253
+ #define cublasZherk cublasZherk_v2
254
+
255
+ #define cublasSsyr2k cublasSsyr2k_v2
256
+ #define cublasDsyr2k cublasDsyr2k_v2
257
+ #define cublasCsyr2k cublasCsyr2k_v2
258
+ #define cublasZsyr2k cublasZsyr2k_v2
259
+ #define cublasCher2k cublasCher2k_v2
260
+ #define cublasZher2k cublasZher2k_v2
261
+
262
+ #define cublasSsymm cublasSsymm_v2
263
+ #define cublasDsymm cublasDsymm_v2
264
+ #define cublasCsymm cublasCsymm_v2
265
+ #define cublasZsymm cublasZsymm_v2
266
+ #define cublasChemm cublasChemm_v2
267
+ #define cublasZhemm cublasZhemm_v2
268
+
269
+ #define cublasStrsm cublasStrsm_v2
270
+ #define cublasDtrsm cublasDtrsm_v2
271
+ #define cublasCtrsm cublasCtrsm_v2
272
+ #define cublasZtrsm cublasZtrsm_v2
273
+
274
+ #define cublasStrmm cublasStrmm_v2
275
+ #define cublasDtrmm cublasDtrmm_v2
276
+ #define cublasCtrmm cublasCtrmm_v2
277
+ #define cublasZtrmm cublasZtrmm_v2
278
+
279
+ /* 64-bit integer */
280
+
281
+ /* Blas1 Routines */
282
+
283
+ #define cublasSnrm2_64 cublasSnrm2_v2_64
284
+ #define cublasDnrm2_64 cublasDnrm2_v2_64
285
+ #define cublasScnrm2_64 cublasScnrm2_v2_64
286
+ #define cublasDznrm2_64 cublasDznrm2_v2_64
287
+
288
+ #define cublasSdot_64 cublasSdot_v2_64
289
+ #define cublasDdot_64 cublasDdot_v2_64
290
+ #define cublasCdotu_64 cublasCdotu_v2_64
291
+ #define cublasCdotc_64 cublasCdotc_v2_64
292
+ #define cublasZdotu_64 cublasZdotu_v2_64
293
+ #define cublasZdotc_64 cublasZdotc_v2_64
294
+
295
+ #define cublasSscal_64 cublasSscal_v2_64
296
+ #define cublasDscal_64 cublasDscal_v2_64
297
+ #define cublasCscal_64 cublasCscal_v2_64
298
+ #define cublasCsscal_64 cublasCsscal_v2_64
299
+ #define cublasZscal_64 cublasZscal_v2_64
300
+ #define cublasZdscal_64 cublasZdscal_v2_64
301
+
302
+ #define cublasSaxpy_64 cublasSaxpy_v2_64
303
+ #define cublasDaxpy_64 cublasDaxpy_v2_64
304
+ #define cublasCaxpy_64 cublasCaxpy_v2_64
305
+ #define cublasZaxpy_64 cublasZaxpy_v2_64
306
+
307
+ #define cublasScopy_64 cublasScopy_v2_64
308
+ #define cublasDcopy_64 cublasDcopy_v2_64
309
+ #define cublasCcopy_64 cublasCcopy_v2_64
310
+ #define cublasZcopy_64 cublasZcopy_v2_64
311
+
312
+ #define cublasSswap_64 cublasSswap_v2_64
313
+ #define cublasDswap_64 cublasDswap_v2_64
314
+ #define cublasCswap_64 cublasCswap_v2_64
315
+ #define cublasZswap_64 cublasZswap_v2_64
316
+
317
+ #define cublasIsamax_64 cublasIsamax_v2_64
318
+ #define cublasIdamax_64 cublasIdamax_v2_64
319
+ #define cublasIcamax_64 cublasIcamax_v2_64
320
+ #define cublasIzamax_64 cublasIzamax_v2_64
321
+
322
+ #define cublasIsamin_64 cublasIsamin_v2_64
323
+ #define cublasIdamin_64 cublasIdamin_v2_64
324
+ #define cublasIcamin_64 cublasIcamin_v2_64
325
+ #define cublasIzamin_64 cublasIzamin_v2_64
326
+
327
+ #define cublasSasum_64 cublasSasum_v2_64
328
+ #define cublasDasum_64 cublasDasum_v2_64
329
+ #define cublasScasum_64 cublasScasum_v2_64
330
+ #define cublasDzasum_64 cublasDzasum_v2_64
331
+
332
+ #define cublasSrot_64 cublasSrot_v2_64
333
+ #define cublasDrot_64 cublasDrot_v2_64
334
+ #define cublasCrot_64 cublasCrot_v2_64
335
+ #define cublasCsrot_64 cublasCsrot_v2_64
336
+ #define cublasZrot_64 cublasZrot_v2_64
337
+ #define cublasZdrot_64 cublasZdrot_v2_64
338
+
339
+ #define cublasSrotg_64 cublasSrotg_v2_64
340
+ #define cublasDrotg_64 cublasDrotg_v2_64
341
+ #define cublasCrotg_64 cublasCrotg_v2_64
342
+ #define cublasZrotg_64 cublasZrotg_v2_64
343
+
344
+ #define cublasSrotm_64 cublasSrotm_v2_64
345
+ #define cublasDrotm_64 cublasDrotm_v2_64
346
+
347
+ #define cublasSrotmg_64 cublasSrotmg_v2_64
348
+ #define cublasDrotmg_64 cublasDrotmg_v2_64
349
+
350
+ /* Blas2 Routines */
351
+
352
+ #define cublasSgemv_64 cublasSgemv_v2_64
353
+ #define cublasDgemv_64 cublasDgemv_v2_64
354
+ #define cublasCgemv_64 cublasCgemv_v2_64
355
+ #define cublasZgemv_64 cublasZgemv_v2_64
356
+
357
+ #define cublasSgbmv_64 cublasSgbmv_v2_64
358
+ #define cublasDgbmv_64 cublasDgbmv_v2_64
359
+ #define cublasCgbmv_64 cublasCgbmv_v2_64
360
+ #define cublasZgbmv_64 cublasZgbmv_v2_64
361
+
362
+ #define cublasStrmv_64 cublasStrmv_v2_64
363
+ #define cublasDtrmv_64 cublasDtrmv_v2_64
364
+ #define cublasCtrmv_64 cublasCtrmv_v2_64
365
+ #define cublasZtrmv_64 cublasZtrmv_v2_64
366
+
367
+ #define cublasStbmv_64 cublasStbmv_v2_64
368
+ #define cublasDtbmv_64 cublasDtbmv_v2_64
369
+ #define cublasCtbmv_64 cublasCtbmv_v2_64
370
+ #define cublasZtbmv_64 cublasZtbmv_v2_64
371
+
372
+ #define cublasStpmv_64 cublasStpmv_v2_64
373
+ #define cublasDtpmv_64 cublasDtpmv_v2_64
374
+ #define cublasCtpmv_64 cublasCtpmv_v2_64
375
+ #define cublasZtpmv_64 cublasZtpmv_v2_64
376
+
377
+ #define cublasStrsv_64 cublasStrsv_v2_64
378
+ #define cublasDtrsv_64 cublasDtrsv_v2_64
379
+ #define cublasCtrsv_64 cublasCtrsv_v2_64
380
+ #define cublasZtrsv_64 cublasZtrsv_v2_64
381
+
382
+ #define cublasStpsv_64 cublasStpsv_v2_64
383
+ #define cublasDtpsv_64 cublasDtpsv_v2_64
384
+ #define cublasCtpsv_64 cublasCtpsv_v2_64
385
+ #define cublasZtpsv_64 cublasZtpsv_v2_64
386
+
387
+ #define cublasStbsv_64 cublasStbsv_v2_64
388
+ #define cublasDtbsv_64 cublasDtbsv_v2_64
389
+ #define cublasCtbsv_64 cublasCtbsv_v2_64
390
+ #define cublasZtbsv_64 cublasZtbsv_v2_64
391
+
392
+ #define cublasSsymv_64 cublasSsymv_v2_64
393
+ #define cublasDsymv_64 cublasDsymv_v2_64
394
+ #define cublasCsymv_64 cublasCsymv_v2_64
395
+ #define cublasZsymv_64 cublasZsymv_v2_64
396
+ #define cublasChemv_64 cublasChemv_v2_64
397
+ #define cublasZhemv_64 cublasZhemv_v2_64
398
+
399
+ #define cublasSsbmv_64 cublasSsbmv_v2_64
400
+ #define cublasDsbmv_64 cublasDsbmv_v2_64
401
+ #define cublasChbmv_64 cublasChbmv_v2_64
402
+ #define cublasZhbmv_64 cublasZhbmv_v2_64
403
+
404
+ #define cublasSspmv_64 cublasSspmv_v2_64
405
+ #define cublasDspmv_64 cublasDspmv_v2_64
406
+ #define cublasChpmv_64 cublasChpmv_v2_64
407
+ #define cublasZhpmv_64 cublasZhpmv_v2_64
408
+
409
+ #define cublasSger_64 cublasSger_v2_64
410
+ #define cublasDger_64 cublasDger_v2_64
411
+ #define cublasCgeru_64 cublasCgeru_v2_64
412
+ #define cublasCgerc_64 cublasCgerc_v2_64
413
+ #define cublasZgeru_64 cublasZgeru_v2_64
414
+ #define cublasZgerc_64 cublasZgerc_v2_64
415
+
416
+ #define cublasSsyr_64 cublasSsyr_v2_64
417
+ #define cublasDsyr_64 cublasDsyr_v2_64
418
+ #define cublasCsyr_64 cublasCsyr_v2_64
419
+ #define cublasZsyr_64 cublasZsyr_v2_64
420
+ #define cublasCher_64 cublasCher_v2_64
421
+ #define cublasZher_64 cublasZher_v2_64
422
+
423
+ #define cublasSspr_64 cublasSspr_v2_64
424
+ #define cublasDspr_64 cublasDspr_v2_64
425
+ #define cublasChpr_64 cublasChpr_v2_64
426
+ #define cublasZhpr_64 cublasZhpr_v2_64
427
+
428
+ #define cublasSsyr2_64 cublasSsyr2_v2_64
429
+ #define cublasDsyr2_64 cublasDsyr2_v2_64
430
+ #define cublasCsyr2_64 cublasCsyr2_v2_64
431
+ #define cublasZsyr2_64 cublasZsyr2_v2_64
432
+ #define cublasCher2_64 cublasCher2_v2_64
433
+ #define cublasZher2_64 cublasZher2_v2_64
434
+
435
+ #define cublasSspr2_64 cublasSspr2_v2_64
436
+ #define cublasDspr2_64 cublasDspr2_v2_64
437
+ #define cublasChpr2_64 cublasChpr2_v2_64
438
+ #define cublasZhpr2_64 cublasZhpr2_v2_64
439
+
440
+ /* Blas3 Routines */
441
+
442
+ #define cublasSgemm_64 cublasSgemm_v2_64
443
+ #define cublasDgemm_64 cublasDgemm_v2_64
444
+ #define cublasCgemm_64 cublasCgemm_v2_64
445
+ #define cublasZgemm_64 cublasZgemm_v2_64
446
+
447
+ #define cublasSsyrk_64 cublasSsyrk_v2_64
448
+ #define cublasDsyrk_64 cublasDsyrk_v2_64
449
+ #define cublasCsyrk_64 cublasCsyrk_v2_64
450
+ #define cublasZsyrk_64 cublasZsyrk_v2_64
451
+ #define cublasCherk_64 cublasCherk_v2_64
452
+ #define cublasZherk_64 cublasZherk_v2_64
453
+
454
+ #define cublasSsyr2k_64 cublasSsyr2k_v2_64
455
+ #define cublasDsyr2k_64 cublasDsyr2k_v2_64
456
+ #define cublasCsyr2k_64 cublasCsyr2k_v2_64
457
+ #define cublasZsyr2k_64 cublasZsyr2k_v2_64
458
+ #define cublasCher2k_64 cublasCher2k_v2_64
459
+ #define cublasZher2k_64 cublasZher2k_v2_64
460
+
461
+ #define cublasSsymm_64 cublasSsymm_v2_64
462
+ #define cublasDsymm_64 cublasDsymm_v2_64
463
+ #define cublasCsymm_64 cublasCsymm_v2_64
464
+ #define cublasZsymm_64 cublasZsymm_v2_64
465
+ #define cublasChemm_64 cublasChemm_v2_64
466
+ #define cublasZhemm_64 cublasZhemm_v2_64
467
+
468
+ #define cublasStrsm_64 cublasStrsm_v2_64
469
+ #define cublasDtrsm_64 cublasDtrsm_v2_64
470
+ #define cublasCtrsm_64 cublasCtrsm_v2_64
471
+ #define cublasZtrsm_64 cublasZtrsm_v2_64
472
+
473
+ #define cublasStrmm_64 cublasStrmm_v2_64
474
+ #define cublasDtrmm_64 cublasDtrmm_v2_64
475
+ #define cublasCtrmm_64 cublasCtrmm_v2_64
476
+ #define cublasZtrmm_64 cublasZtrmm_v2_64
477
+
478
+ #endif /* !defined(CUBLAS_V2_H_) */
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/include/nvblas.h ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(NVBLAS_H_)
51
+ #define NVBLAS_H_
52
+
53
+ #include "driver_types.h"
54
+ #include "cuComplex.h" /* import complex data type */
55
+
56
+ #if defined(__cplusplus)
57
+ extern "C" {
58
+ #endif
59
+
60
+ /* GEMM */
61
+ void sgemm_(const char* transa,
62
+ const char* transb,
63
+ const int* m,
64
+ const int* n,
65
+ const int* k,
66
+ const float* alpha,
67
+ const float* a,
68
+ const int* lda,
69
+ const float* b,
70
+ const int* ldb,
71
+ const float* beta,
72
+ float* c,
73
+ const int* ldc);
74
+
75
+ void dgemm_(const char* transa,
76
+ const char* transb,
77
+ const int* m,
78
+ const int* n,
79
+ const int* k,
80
+ const double* alpha,
81
+ const double* a,
82
+ const int* lda,
83
+ const double* b,
84
+ const int* ldb,
85
+ const double* beta,
86
+ double* c,
87
+ const int* ldc);
88
+
89
+ void cgemm_(const char* transa,
90
+ const char* transb,
91
+ const int* m,
92
+ const int* n,
93
+ const int* k,
94
+ const cuComplex* alpha,
95
+ const cuComplex* a,
96
+ const int* lda,
97
+ const cuComplex* b,
98
+ const int* ldb,
99
+ const cuComplex* beta,
100
+ cuComplex* c,
101
+ const int* ldc);
102
+
103
+ void zgemm_(const char* transa,
104
+ const char* transb,
105
+ const int* m,
106
+ const int* n,
107
+ const int* k,
108
+ const cuDoubleComplex* alpha,
109
+ const cuDoubleComplex* a,
110
+ const int* lda,
111
+ const cuDoubleComplex* b,
112
+ const int* ldb,
113
+ const cuDoubleComplex* beta,
114
+ cuDoubleComplex* c,
115
+ const int* ldc);
116
+
117
+ void sgemm(const char* transa,
118
+ const char* transb,
119
+ const int* m,
120
+ const int* n,
121
+ const int* k,
122
+ const float* alpha,
123
+ const float* a,
124
+ const int* lda,
125
+ const float* b,
126
+ const int* ldb,
127
+ const float* beta,
128
+ float* c,
129
+ const int* ldc);
130
+
131
+ void dgemm(const char* transa,
132
+ const char* transb,
133
+ const int* m,
134
+ const int* n,
135
+ const int* k,
136
+ const double* alpha,
137
+ const double* a,
138
+ const int* lda,
139
+ const double* b,
140
+ const int* ldb,
141
+ const double* beta,
142
+ double* c,
143
+ const int* ldc);
144
+
145
+ void cgemm(const char* transa,
146
+ const char* transb,
147
+ const int* m,
148
+ const int* n,
149
+ const int* k,
150
+ const cuComplex* alpha,
151
+ const cuComplex* a,
152
+ const int* lda,
153
+ const cuComplex* b,
154
+ const int* ldb,
155
+ const cuComplex* beta,
156
+ cuComplex* c,
157
+ const int* ldc);
158
+
159
+ void zgemm(const char* transa,
160
+ const char* transb,
161
+ const int* m,
162
+ const int* n,
163
+ const int* k,
164
+ const cuDoubleComplex* alpha,
165
+ const cuDoubleComplex* a,
166
+ const int* lda,
167
+ const cuDoubleComplex* b,
168
+ const int* ldb,
169
+ const cuDoubleComplex* beta,
170
+ cuDoubleComplex* c,
171
+ const int* ldc);
172
+
173
+ /* SYRK */
174
+ void ssyrk_(const char* uplo,
175
+ const char* trans,
176
+ const int* n,
177
+ const int* k,
178
+ const float* alpha,
179
+ const float* a,
180
+ const int* lda,
181
+ const float* beta,
182
+ float* c,
183
+ const int* ldc);
184
+
185
+ void dsyrk_(const char* uplo,
186
+ const char* trans,
187
+ const int* n,
188
+ const int* k,
189
+ const double* alpha,
190
+ const double* a,
191
+ const int* lda,
192
+ const double* beta,
193
+ double* c,
194
+ const int* ldc);
195
+
196
+ void csyrk_(const char* uplo,
197
+ const char* trans,
198
+ const int* n,
199
+ const int* k,
200
+ const cuComplex* alpha,
201
+ const cuComplex* a,
202
+ const int* lda,
203
+ const cuComplex* beta,
204
+ cuComplex* c,
205
+ const int* ldc);
206
+
207
+ void zsyrk_(const char* uplo,
208
+ const char* trans,
209
+ const int* n,
210
+ const int* k,
211
+ const cuDoubleComplex* alpha,
212
+ const cuDoubleComplex* a,
213
+ const int* lda,
214
+ const cuDoubleComplex* beta,
215
+ cuDoubleComplex* c,
216
+ const int* ldc);
217
+
218
+ void ssyrk(const char* uplo,
219
+ const char* trans,
220
+ const int* n,
221
+ const int* k,
222
+ const float* alpha,
223
+ const float* a,
224
+ const int* lda,
225
+ const float* beta,
226
+ float* c,
227
+ const int* ldc);
228
+
229
+ void dsyrk(const char* uplo,
230
+ const char* trans,
231
+ const int* n,
232
+ const int* k,
233
+ const double* alpha,
234
+ const double* a,
235
+ const int* lda,
236
+ const double* beta,
237
+ double* c,
238
+ const int* ldc);
239
+
240
+ void csyrk(const char* uplo,
241
+ const char* trans,
242
+ const int* n,
243
+ const int* k,
244
+ const cuComplex* alpha,
245
+ const cuComplex* a,
246
+ const int* lda,
247
+ const cuComplex* beta,
248
+ cuComplex* c,
249
+ const int* ldc);
250
+
251
+ void zsyrk(const char* uplo,
252
+ const char* trans,
253
+ const int* n,
254
+ const int* k,
255
+ const cuDoubleComplex* alpha,
256
+ const cuDoubleComplex* a,
257
+ const int* lda,
258
+ const cuDoubleComplex* beta,
259
+ cuDoubleComplex* c,
260
+ const int* ldc);
261
+
262
+ /* HERK */
263
+ void cherk_(const char* uplo,
264
+ const char* trans,
265
+ const int* n,
266
+ const int* k,
267
+ const float* alpha,
268
+ const cuComplex* a,
269
+ const int* lda,
270
+ const float* beta,
271
+ cuComplex* c,
272
+ const int* ldc);
273
+
274
+ void zherk_(const char* uplo,
275
+ const char* trans,
276
+ const int* n,
277
+ const int* k,
278
+ const double* alpha,
279
+ const cuDoubleComplex* a,
280
+ const int* lda,
281
+ const double* beta,
282
+ cuDoubleComplex* c,
283
+ const int* ldc);
284
+
285
+ void cherk(const char* uplo,
286
+ const char* trans,
287
+ const int* n,
288
+ const int* k,
289
+ const float* alpha,
290
+ const cuComplex* a,
291
+ const int* lda,
292
+ const float* beta,
293
+ cuComplex* c,
294
+ const int* ldc);
295
+
296
+ void zherk(const char* uplo,
297
+ const char* trans,
298
+ const int* n,
299
+ const int* k,
300
+ const double* alpha,
301
+ const cuDoubleComplex* a,
302
+ const int* lda,
303
+ const double* beta,
304
+ cuDoubleComplex* c,
305
+ const int* ldc);
306
+
307
+ /* TRSM */
308
+ void strsm_(const char* side,
309
+ const char* uplo,
310
+ const char* transa,
311
+ const char* diag,
312
+ const int* m,
313
+ const int* n,
314
+ const float* alpha,
315
+ const float* a,
316
+ const int* lda,
317
+ float* b,
318
+ const int* ldb);
319
+
320
+ void dtrsm_(const char* side,
321
+ const char* uplo,
322
+ const char* transa,
323
+ const char* diag,
324
+ const int* m,
325
+ const int* n,
326
+ const double* alpha,
327
+ const double* a,
328
+ const int* lda,
329
+ double* b,
330
+ const int* ldb);
331
+
332
+ void ctrsm_(const char* side,
333
+ const char* uplo,
334
+ const char* transa,
335
+ const char* diag,
336
+ const int* m,
337
+ const int* n,
338
+ const cuComplex* alpha,
339
+ const cuComplex* a,
340
+ const int* lda,
341
+ cuComplex* b,
342
+ const int* ldb);
343
+
344
+ void ztrsm_(const char* side,
345
+ const char* uplo,
346
+ const char* transa,
347
+ const char* diag,
348
+ const int* m,
349
+ const int* n,
350
+ const cuDoubleComplex* alpha,
351
+ const cuDoubleComplex* a,
352
+ const int* lda,
353
+ cuDoubleComplex* b,
354
+ const int* ldb);
355
+
356
+ void strsm(const char* side,
357
+ const char* uplo,
358
+ const char* transa,
359
+ const char* diag,
360
+ const int* m,
361
+ const int* n,
362
+ const float* alpha,
363
+ const float* a,
364
+ const int* lda,
365
+ float* b,
366
+ const int* ldb);
367
+
368
+ void dtrsm(const char* side,
369
+ const char* uplo,
370
+ const char* transa,
371
+ const char* diag,
372
+ const int* m,
373
+ const int* n,
374
+ const double* alpha,
375
+ const double* a,
376
+ const int* lda,
377
+ double* b,
378
+ const int* ldb);
379
+
380
+ void ctrsm(const char* side,
381
+ const char* uplo,
382
+ const char* transa,
383
+ const char* diag,
384
+ const int* m,
385
+ const int* n,
386
+ const cuComplex* alpha,
387
+ const cuComplex* a,
388
+ const int* lda,
389
+ cuComplex* b,
390
+ const int* ldb);
391
+
392
+ void ztrsm(const char* side,
393
+ const char* uplo,
394
+ const char* transa,
395
+ const char* diag,
396
+ const int* m,
397
+ const int* n,
398
+ const cuDoubleComplex* alpha,
399
+ const cuDoubleComplex* a,
400
+ const int* lda,
401
+ cuDoubleComplex* b,
402
+ const int* ldb);
403
+
404
+ /* SYMM */
405
+ void ssymm_(const char* side,
406
+ const char* uplo,
407
+ const int* m,
408
+ const int* n,
409
+ const float* alpha,
410
+ const float* a,
411
+ const int* lda,
412
+ const float* b,
413
+ const int* ldb,
414
+ const float* beta,
415
+ float* c,
416
+ const int* ldc);
417
+
418
+ void dsymm_(const char* side,
419
+ const char* uplo,
420
+ const int* m,
421
+ const int* n,
422
+ const double* alpha,
423
+ const double* a,
424
+ const int* lda,
425
+ const double* b,
426
+ const int* ldb,
427
+ const double* beta,
428
+ double* c,
429
+ const int* ldc);
430
+
431
+ void csymm_(const char* side,
432
+ const char* uplo,
433
+ const int* m,
434
+ const int* n,
435
+ const cuComplex* alpha,
436
+ const cuComplex* a,
437
+ const int* lda,
438
+ const cuComplex* b,
439
+ const int* ldb,
440
+ const cuComplex* beta,
441
+ cuComplex* c,
442
+ const int* ldc);
443
+
444
+ void zsymm_(const char* side,
445
+ const char* uplo,
446
+ const int* m,
447
+ const int* n,
448
+ const cuDoubleComplex* alpha,
449
+ const cuDoubleComplex* a,
450
+ const int* lda,
451
+ const cuDoubleComplex* b,
452
+ const int* ldb,
453
+ const cuDoubleComplex* beta,
454
+ cuDoubleComplex* c,
455
+ const int* ldc);
456
+
457
+ void ssymm(const char* side,
458
+ const char* uplo,
459
+ const int* m,
460
+ const int* n,
461
+ const float* alpha,
462
+ const float* a,
463
+ const int* lda,
464
+ const float* b,
465
+ const int* ldb,
466
+ const float* beta,
467
+ float* c,
468
+ const int* ldc);
469
+
470
+ void dsymm(const char* side,
471
+ const char* uplo,
472
+ const int* m,
473
+ const int* n,
474
+ const double* alpha,
475
+ const double* a,
476
+ const int* lda,
477
+ const double* b,
478
+ const int* ldb,
479
+ const double* beta,
480
+ double* c,
481
+ const int* ldc);
482
+
483
+ void csymm(const char* side,
484
+ const char* uplo,
485
+ const int* m,
486
+ const int* n,
487
+ const cuComplex* alpha,
488
+ const cuComplex* a,
489
+ const int* lda,
490
+ const cuComplex* b,
491
+ const int* ldb,
492
+ const cuComplex* beta,
493
+ cuComplex* c,
494
+ const int* ldc);
495
+
496
+ void zsymm(const char* side,
497
+ const char* uplo,
498
+ const int* m,
499
+ const int* n,
500
+ const cuDoubleComplex* alpha,
501
+ const cuDoubleComplex* a,
502
+ const int* lda,
503
+ const cuDoubleComplex* b,
504
+ const int* ldb,
505
+ const cuDoubleComplex* beta,
506
+ cuDoubleComplex* c,
507
+ const int* ldc);
508
+
509
+ /* HEMM */
510
+ void chemm_(const char* side,
511
+ const char* uplo,
512
+ const int* m,
513
+ const int* n,
514
+ const cuComplex* alpha,
515
+ const cuComplex* a,
516
+ const int* lda,
517
+ const cuComplex* b,
518
+ const int* ldb,
519
+ const cuComplex* beta,
520
+ cuComplex* c,
521
+ const int* ldc);
522
+
523
+ void zhemm_(const char* side,
524
+ const char* uplo,
525
+ const int* m,
526
+ const int* n,
527
+ const cuDoubleComplex* alpha,
528
+ const cuDoubleComplex* a,
529
+ const int* lda,
530
+ const cuDoubleComplex* b,
531
+ const int* ldb,
532
+ const cuDoubleComplex* beta,
533
+ cuDoubleComplex* c,
534
+ const int* ldc);
535
+
536
+ /* HEMM with no underscore*/
537
+ void chemm(const char* side,
538
+ const char* uplo,
539
+ const int* m,
540
+ const int* n,
541
+ const cuComplex* alpha,
542
+ const cuComplex* a,
543
+ const int* lda,
544
+ const cuComplex* b,
545
+ const int* ldb,
546
+ const cuComplex* beta,
547
+ cuComplex* c,
548
+ const int* ldc);
549
+
550
+ void zhemm(const char* side,
551
+ const char* uplo,
552
+ const int* m,
553
+ const int* n,
554
+ const cuDoubleComplex* alpha,
555
+ const cuDoubleComplex* a,
556
+ const int* lda,
557
+ const cuDoubleComplex* b,
558
+ const int* ldb,
559
+ const cuDoubleComplex* beta,
560
+ cuDoubleComplex* c,
561
+ const int* ldc);
562
+
563
+ /* SYR2K */
564
+ void ssyr2k_(const char* uplo,
565
+ const char* trans,
566
+ const int* n,
567
+ const int* k,
568
+ const float* alpha,
569
+ const float* a,
570
+ const int* lda,
571
+ const float* b,
572
+ const int* ldb,
573
+ const float* beta,
574
+ float* c,
575
+ const int* ldc);
576
+
577
+ void dsyr2k_(const char* uplo,
578
+ const char* trans,
579
+ const int* n,
580
+ const int* k,
581
+ const double* alpha,
582
+ const double* a,
583
+ const int* lda,
584
+ const double* b,
585
+ const int* ldb,
586
+ const double* beta,
587
+ double* c,
588
+ const int* ldc);
589
+
590
+ void csyr2k_(const char* uplo,
591
+ const char* trans,
592
+ const int* n,
593
+ const int* k,
594
+ const cuComplex* alpha,
595
+ const cuComplex* a,
596
+ const int* lda,
597
+ const cuComplex* b,
598
+ const int* ldb,
599
+ const cuComplex* beta,
600
+ cuComplex* c,
601
+ const int* ldc);
602
+
603
+ void zsyr2k_(const char* uplo,
604
+ const char* trans,
605
+ const int* n,
606
+ const int* k,
607
+ const cuDoubleComplex* alpha,
608
+ const cuDoubleComplex* a,
609
+ const int* lda,
610
+ const cuDoubleComplex* b,
611
+ const int* ldb,
612
+ const cuDoubleComplex* beta,
613
+ cuDoubleComplex* c,
614
+ const int* ldc);
615
+
616
+ /* SYR2K no_underscore*/
617
+ void ssyr2k(const char* uplo,
618
+ const char* trans,
619
+ const int* n,
620
+ const int* k,
621
+ const float* alpha,
622
+ const float* a,
623
+ const int* lda,
624
+ const float* b,
625
+ const int* ldb,
626
+ const float* beta,
627
+ float* c,
628
+ const int* ldc);
629
+
630
+ void dsyr2k(const char* uplo,
631
+ const char* trans,
632
+ const int* n,
633
+ const int* k,
634
+ const double* alpha,
635
+ const double* a,
636
+ const int* lda,
637
+ const double* b,
638
+ const int* ldb,
639
+ const double* beta,
640
+ double* c,
641
+ const int* ldc);
642
+
643
+ void csyr2k(const char* uplo,
644
+ const char* trans,
645
+ const int* n,
646
+ const int* k,
647
+ const cuComplex* alpha,
648
+ const cuComplex* a,
649
+ const int* lda,
650
+ const cuComplex* b,
651
+ const int* ldb,
652
+ const cuComplex* beta,
653
+ cuComplex* c,
654
+ const int* ldc);
655
+
656
+ void zsyr2k(const char* uplo,
657
+ const char* trans,
658
+ const int* n,
659
+ const int* k,
660
+ const cuDoubleComplex* alpha,
661
+ const cuDoubleComplex* a,
662
+ const int* lda,
663
+ const cuDoubleComplex* b,
664
+ const int* ldb,
665
+ const cuDoubleComplex* beta,
666
+ cuDoubleComplex* c,
667
+ const int* ldc);
668
+
669
+ /* HERK */
670
+ void cher2k_(const char* uplo,
671
+ const char* trans,
672
+ const int* n,
673
+ const int* k,
674
+ const cuComplex* alpha,
675
+ const cuComplex* a,
676
+ const int* lda,
677
+ const cuComplex* b,
678
+ const int* ldb,
679
+ const float* beta,
680
+ cuComplex* c,
681
+ const int* ldc);
682
+
683
+ void zher2k_(const char* uplo,
684
+ const char* trans,
685
+ const int* n,
686
+ const int* k,
687
+ const cuDoubleComplex* alpha,
688
+ const cuDoubleComplex* a,
689
+ const int* lda,
690
+ const cuDoubleComplex* b,
691
+ const int* ldb,
692
+ const double* beta,
693
+ cuDoubleComplex* c,
694
+ const int* ldc);
695
+
696
+ /* HER2K with no underscore */
697
+ void cher2k(const char* uplo,
698
+ const char* trans,
699
+ const int* n,
700
+ const int* k,
701
+ const cuComplex* alpha,
702
+ const cuComplex* a,
703
+ const int* lda,
704
+ const cuComplex* b,
705
+ const int* ldb,
706
+ const float* beta,
707
+ cuComplex* c,
708
+ const int* ldc);
709
+
710
+ void zher2k(const char* uplo,
711
+ const char* trans,
712
+ const int* n,
713
+ const int* k,
714
+ const cuDoubleComplex* alpha,
715
+ const cuDoubleComplex* a,
716
+ const int* lda,
717
+ const cuDoubleComplex* b,
718
+ const int* ldb,
719
+ const double* beta,
720
+ cuDoubleComplex* c,
721
+ const int* ldc);
722
+
723
+ /* TRMM */
724
+ void strmm_(const char* side,
725
+ const char* uplo,
726
+ const char* transa,
727
+ const char* diag,
728
+ const int* m,
729
+ const int* n,
730
+ const float* alpha,
731
+ const float* a,
732
+ const int* lda,
733
+ float* b,
734
+ const int* ldb);
735
+
736
+ void dtrmm_(const char* side,
737
+ const char* uplo,
738
+ const char* transa,
739
+ const char* diag,
740
+ const int* m,
741
+ const int* n,
742
+ const double* alpha,
743
+ const double* a,
744
+ const int* lda,
745
+ double* b,
746
+ const int* ldb);
747
+
748
+ void ctrmm_(const char* side,
749
+ const char* uplo,
750
+ const char* transa,
751
+ const char* diag,
752
+ const int* m,
753
+ const int* n,
754
+ const cuComplex* alpha,
755
+ const cuComplex* a,
756
+ const int* lda,
757
+ cuComplex* b,
758
+ const int* ldb);
759
+
760
+ void ztrmm_(const char* side,
761
+ const char* uplo,
762
+ const char* transa,
763
+ const char* diag,
764
+ const int* m,
765
+ const int* n,
766
+ const cuDoubleComplex* alpha,
767
+ const cuDoubleComplex* a,
768
+ const int* lda,
769
+ cuDoubleComplex* b,
770
+ const int* ldb);
771
+
772
+ void strmm(const char* side,
773
+ const char* uplo,
774
+ const char* transa,
775
+ const char* diag,
776
+ const int* m,
777
+ const int* n,
778
+ const float* alpha,
779
+ const float* a,
780
+ const int* lda,
781
+ float* b,
782
+ const int* ldb);
783
+
784
+ void dtrmm(const char* side,
785
+ const char* uplo,
786
+ const char* transa,
787
+ const char* diag,
788
+ const int* m,
789
+ const int* n,
790
+ const double* alpha,
791
+ const double* a,
792
+ const int* lda,
793
+ double* b,
794
+ const int* ldb);
795
+
796
+ void ctrmm(const char* side,
797
+ const char* uplo,
798
+ const char* transa,
799
+ const char* diag,
800
+ const int* m,
801
+ const int* n,
802
+ const cuComplex* alpha,
803
+ const cuComplex* a,
804
+ const int* lda,
805
+ cuComplex* b,
806
+ const int* ldb);
807
+
808
+ void ztrmm(const char* side,
809
+ const char* uplo,
810
+ const char* transa,
811
+ const char* diag,
812
+ const int* m,
813
+ const int* n,
814
+ const cuDoubleComplex* alpha,
815
+ const cuDoubleComplex* a,
816
+ const int* lda,
817
+ cuDoubleComplex* b,
818
+ const int* ldb);
819
+
820
+ #if defined(__cplusplus)
821
+ }
822
+ #endif /* __cplusplus */
823
+
824
+ #endif /* !defined(NVBLAS_H_) */
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 ADDED
Binary file (737 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*******************************************************************************
51
+ * *
52
+ * *
53
+ * *
54
+ *******************************************************************************/
55
+
56
+ #include "device_types.h"
57
+ #if !defined(__CUDACC_RTC__)
58
+ #define EXCLUDE_FROM_RTC
59
+ #include "driver_types.h"
60
+ #undef EXCLUDE_FROM_RTC
61
+ #endif /* !__CUDACC_RTC__ */
62
+ #include "surface_types.h"
63
+ #include "texture_types.h"
64
+ #include "vector_types.h"
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/common_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
65
+ #endif
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_ASYNC_H
50
+ #define _CG_ASYNC_H
51
+
52
+ #include "helpers.h"
53
+ #include "info.h"
54
+
55
+ #include <cuda_pipeline.h>
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+ // Groups supported by memcpy_async
61
+ template <class TyGroup>
62
+ struct _async_copy_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _async_copy_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>>
66
+ : public _CG_STL_NAMESPACE::true_type {};
67
+ template <>
68
+ struct _async_copy_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
69
+ template <>
70
+ struct _async_copy_group_supported<cooperative_groups::thread_block> : public _CG_STL_NAMESPACE::true_type {};
71
+
72
+ template <class TyGroup>
73
+ using async_copy_group_supported = _async_copy_group_supported<details::remove_qual<TyGroup>>;
74
+
75
+ // Groups that require optimization
76
+ template <class TyGroup>
77
+ struct _async_copy_optimize_tile : public _CG_STL_NAMESPACE::false_type {};
78
+
79
+ template <typename TyPar>
80
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<1, TyPar>>
81
+ : public _CG_STL_NAMESPACE::false_type {};
82
+
83
+ template <unsigned int Sz, typename TyPar>
84
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<Sz, TyPar>>
85
+ : public _CG_STL_NAMESPACE::true_type {};
86
+
87
+ template <class TyGroup>
88
+ using async_copy_optimize_tile = _async_copy_optimize_tile<details::remove_qual<TyGroup>>;
89
+
90
+ // SFINAE helpers for tile optimizations
91
+ template <class TyGroup>
92
+ using enable_tile_optimization =
93
+ typename _CG_STL_NAMESPACE::enable_if<async_copy_optimize_tile<TyGroup>::value, void *>::type;
94
+
95
+ template <class TyGroup>
96
+ using disable_tile_optimization =
97
+ typename _CG_STL_NAMESPACE::enable_if<!async_copy_optimize_tile<TyGroup>::value, void *>::type;
98
+
99
+ // Segment for punning to aligned types
100
+ template <unsigned int N>
101
+ struct _Segment {
102
+ int _seg[N];
103
+ };
104
+
105
+ // Trivial layout guaranteed-aligned copy-async compatible segments
106
+ template <unsigned int N>
107
+ struct Segment;
108
+ template <>
109
+ struct __align__(4) Segment<1> : public _Segment<1>{};
110
+ template <>
111
+ struct __align__(8) Segment<2> : public _Segment<2>{};
112
+ template <>
113
+ struct __align__(16) Segment<4> : public _Segment<4>{};
114
+
115
+ // Interleaved element by element copies from source to dest
116
+ template <typename TyGroup, typename TyElem>
117
+ _CG_STATIC_QUALIFIER void inline_copy(TyGroup &group, TyElem *__restrict__ dst, const TyElem *__restrict__ src,
118
+ size_t count) {
119
+ const unsigned int rank = group.thread_rank();
120
+ const unsigned int stride = group.size();
121
+
122
+ for (size_t idx = rank; idx < count; idx += stride) {
123
+ dst[idx] = src[idx];
124
+ }
125
+ }
126
+
127
+ template <typename TyGroup, typename TyElem, enable_tile_optimization<TyGroup> = nullptr>
128
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
129
+ const TyElem *__restrict__ src, size_t count) {
130
+ static_assert(async_copy_group_supported<TyGroup>::value,
131
+ "Async copy is only supported for groups that represent private shared memory");
132
+
133
+ if (count == 0) {
134
+ return;
135
+ }
136
+
137
+ const bool dstIsNotShared = !__isShared(dst);
138
+ const bool srcIsNotGlobal = !__isGlobal(src);
139
+
140
+ if (dstIsNotShared || srcIsNotGlobal) {
141
+ inline_copy(group, dst, src, count);
142
+ return;
143
+ }
144
+
145
+ const unsigned int stride = group.size();
146
+ const unsigned int rank = group.thread_rank();
147
+ // Efficient copies require warps to operate on the same amount of work at each step.
148
+ // remainders are handled in a separate stage to prevent branching
149
+ const unsigned int subWarpMask = (stride - 1);
150
+ const unsigned int subwarpCopies = (subWarpMask & (unsigned int)count);
151
+ const unsigned int maxSubwarpRank = min(rank, subwarpCopies - 1);
152
+
153
+ const size_t warpCopies = (count & (~subWarpMask));
154
+
155
+ for (size_t idx = 0; idx < warpCopies; idx += stride) {
156
+ size_t _srcIdx = rank + idx;
157
+ size_t _dstIdx = rank + idx;
158
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
159
+ }
160
+
161
+ if (subwarpCopies) {
162
+ size_t _srcIdx = warpCopies + maxSubwarpRank;
163
+ size_t _dstIdx = warpCopies + maxSubwarpRank;
164
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
165
+ }
166
+ }
167
+
168
+ template <typename TyGroup, typename TyElem, disable_tile_optimization<TyGroup> = nullptr>
169
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
170
+ const TyElem *__restrict__ src, size_t count) {
171
+ static_assert(async_copy_group_supported<TyGroup>::value,
172
+ "Async copy is only supported for groups that represent private shared memory");
173
+
174
+ const bool dstIsNotShared = !__isShared(dst);
175
+ const bool srcIsNotGlobal = !__isGlobal(src);
176
+
177
+ if (dstIsNotShared || srcIsNotGlobal) {
178
+ inline_copy(group, dst, src, count);
179
+ return;
180
+ }
181
+
182
+ unsigned int stride = group.size();
183
+ unsigned int rank = group.thread_rank();
184
+
185
+ for (size_t idx = rank; idx < count; idx += stride) {
186
+ size_t _srcIdx = idx;
187
+ size_t _dstIdx = idx;
188
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
189
+ }
190
+ }
191
+
192
+ // Determine best possible alignment given an input and initial conditions
193
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
194
+ template <unsigned int MinAlignment, unsigned int MaxAlignment>
195
+ _CG_STATIC_QUALIFIER uint32_t find_best_alignment(void *__restrict__ dst, const void *__restrict__ src) {
196
+ // Narrowing conversion intentional
197
+ uint32_t base1 = (uint32_t) reinterpret_cast<uintptr_t>(src);
198
+ uint32_t base2 = (uint32_t) reinterpret_cast<uintptr_t>(dst);
199
+
200
+ uint32_t diff = ((base1) ^ (base2)) & (MaxAlignment - 1);
201
+
202
+ // range [MaxAlignment, alignof(elem)], step: x >> 1
203
+ // over range of possible alignments, choose best available out of range
204
+ uint32_t out = MaxAlignment;
205
+ #pragma unroll
206
+ for (uint32_t alignment = (MaxAlignment >> 1); alignment >= MinAlignment; alignment >>= 1) {
207
+ if (alignment & diff)
208
+ out = alignment;
209
+ }
210
+
211
+ return out;
212
+ }
213
+
214
+ // Determine best possible alignment given an input and initial conditions
215
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
216
+ template <typename TyType, typename TyGroup>
217
+ _CG_STATIC_QUALIFIER void copy_like(const TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
218
+ size_t count) {
219
+ const char *src = reinterpret_cast<const char *>(_src);
220
+ char *dst = reinterpret_cast<char *>(_dst);
221
+
222
+ constexpr uint32_t targetAlignment = (uint32_t)alignof(TyType);
223
+
224
+ uint32_t base = (uint32_t) reinterpret_cast<uintptr_t>(src);
225
+ uint32_t alignOffset = ((~base) + 1) & (targetAlignment - 1);
226
+
227
+ inline_copy(group, dst, src, alignOffset);
228
+ count -= alignOffset;
229
+ src += alignOffset;
230
+ dst += alignOffset;
231
+
232
+ // Copy using the best available alignment, async_copy expects n-datums, not bytes
233
+ size_t asyncCount = count / sizeof(TyType);
234
+ accelerated_async_copy(group, reinterpret_cast<TyType *>(dst), reinterpret_cast<const TyType *>(src), asyncCount);
235
+ asyncCount *= sizeof(TyType);
236
+
237
+ count -= asyncCount;
238
+ src += asyncCount;
239
+ dst += asyncCount;
240
+ inline_copy(group, dst, src, count);
241
+ }
242
+
243
+ // We must determine alignment and manually align src/dst ourselves
244
+ template <size_t AlignHint>
245
+ struct _memcpy_async_align_dispatch {
246
+ template <typename TyGroup>
247
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ dst, const void *__restrict__ src, size_t count) {
248
+ uint32_t alignment = find_best_alignment<AlignHint, 16>(dst, src);
249
+
250
+ // Avoid copying the extra bytes if desired copy count is smaller
251
+ alignment = count < alignment ? AlignHint : alignment;
252
+
253
+ switch (alignment) {
254
+ default:
255
+ case 1:
256
+ inline_copy(group, reinterpret_cast<char *>(dst), reinterpret_cast<const char *>(src), count);
257
+ break;
258
+ case 2:
259
+ inline_copy(group, reinterpret_cast<short *>(dst), reinterpret_cast<const short *>(src), count >> 1);
260
+ break;
261
+ case 4:
262
+ copy_like<Segment<1>>(group, dst, src, count);
263
+ break;
264
+ case 8:
265
+ copy_like<Segment<2>>(group, dst, src, count);
266
+ break;
267
+ case 16:
268
+ copy_like<Segment<4>>(group, dst, src, count);
269
+ break;
270
+ }
271
+ }
272
+ };
273
+
274
+ // Specialization for 4 byte alignments
275
+ template <>
276
+ struct _memcpy_async_align_dispatch<4> {
277
+ template <typename TyGroup>
278
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
279
+ size_t count) {
280
+ const Segment<1> *src = reinterpret_cast<const Segment<1> *>(_src);
281
+ Segment<1> *dst = reinterpret_cast<Segment<1> *>(_dst);
282
+
283
+ // Dispatch straight to aligned LDGSTS calls
284
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
285
+ }
286
+ };
287
+
288
+ // Specialization for 8 byte alignments
289
+ template <>
290
+ struct _memcpy_async_align_dispatch<8> {
291
+ template <typename TyGroup>
292
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
293
+ size_t count) {
294
+ const Segment<2> *src = reinterpret_cast<const Segment<2> *>(_src);
295
+ Segment<2> *dst = reinterpret_cast<Segment<2> *>(_dst);
296
+
297
+ // Dispatch straight to aligned LDGSTS calls
298
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
299
+ }
300
+ };
301
+
302
+ // Alignments over 16 are truncated to 16 and bypass alignment
303
+ // This is the highest performing memcpy available
304
+ template <>
305
+ struct _memcpy_async_align_dispatch<16> {
306
+ template <typename TyGroup>
307
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
308
+ size_t count) {
309
+ const Segment<4> *src = reinterpret_cast<const Segment<4> *>(_src);
310
+ Segment<4> *dst = reinterpret_cast<Segment<4> *>(_dst);
311
+
312
+ // Dispatch straight to aligned LDGSTS calls
313
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
314
+ }
315
+ };
316
+
317
+ // byte-wide API
318
+ template <size_t Alignment, class TyGroup>
319
+ _CG_STATIC_QUALIFIER void _memcpy_async_dispatch_to_aligned_copy(const TyGroup &group, void *__restrict__ _dst,
320
+ const void *__restrict__ _src, size_t count) {
321
+ static_assert(!(Alignment & (Alignment - 1)), "Known static alignment dispatch must be a power of 2");
322
+ details::_memcpy_async_align_dispatch<Alignment>::copy(group, _dst, _src, count);
323
+ }
324
+
325
+ // Internal dispatch APIs
326
+ // These deduce the alignments and sizes necessary to invoke the underlying copy engine
327
+ template <typename Ty>
328
+ using is_void = _CG_STL_NAMESPACE::is_same<Ty, void>;
329
+
330
+ template <typename Ty>
331
+ using enable_if_not_void = typename _CG_STL_NAMESPACE::enable_if<!is_void<Ty>::value, void *>::type;
332
+
333
+ template <typename Ty>
334
+ using enable_if_void = typename _CG_STL_NAMESPACE::enable_if<is_void<Ty>::value, void *>::type;
335
+
336
+ template <typename Ty>
337
+ using enable_if_integral =
338
+ typename _CG_STL_NAMESPACE::enable_if<_CG_STL_NAMESPACE::is_integral<Ty>::value, void *>::type;
339
+
340
+ // byte-wide API using aligned_sized_t
341
+ template <class TyGroup, template <size_t> typename Alignment, size_t Hint>
342
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, void *__restrict__ _dst,
343
+ const void *__restrict__ _src, const Alignment<Hint> &count) {
344
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
345
+
346
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, (size_t)count);
347
+ }
348
+
349
+ // byte-wide API using type for aligment
350
+ template <class TyGroup, typename TyElem, typename TySize, size_t Hint = alignof(TyElem),
351
+ enable_if_not_void<TyElem> = nullptr, enable_if_integral<TySize> = nullptr>
352
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
353
+ const TyElem *__restrict__ _src, const TySize& count) {
354
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
355
+
356
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, count);
357
+ }
358
+
359
+ // byte-wide API with full alignment deduction required
360
+ template <class TyGroup, typename TyElem, typename TySize, enable_if_void<TyElem> = nullptr,
361
+ enable_if_integral<TySize> = nullptr>
362
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
363
+ const TyElem *__restrict__ _src, const TySize& count) {
364
+ details::_memcpy_async_dispatch_to_aligned_copy<1>(group, _dst, _src, count);
365
+ }
366
+
367
+ // 1d-datum API
368
+ template <class TyGroup, typename TyElem, size_t Hint = alignof(TyElem)>
369
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const size_t dstCount,
370
+ const TyElem *__restrict__ src, const size_t srcCount) {
371
+ constexpr unsigned int _align = Hint;
372
+ const size_t totalCount = min(dstCount, srcCount) * sizeof(TyElem);
373
+
374
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
375
+ }
376
+
377
+ // 1d-datum API using aligned_size_t
378
+ template <class TyGroup, typename TyElem, template <size_t> typename Alignment, size_t Hint>
379
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const Alignment<Hint> &dstCount,
380
+ const TyElem *__restrict__ src, const Alignment<Hint> &srcCount) {
381
+ constexpr unsigned int _align = Hint;
382
+ const size_t totalCount = min((size_t)dstCount, (size_t)srcCount) * sizeof(TyElem);
383
+
384
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
385
+ }
386
+
387
+ } // namespace details
388
+
389
+ /*
390
+ * Group submit batch of async-copy to cover contiguous 1D array
391
+ * and commit that batch to eventually wait for completion.
392
+ */
393
+ template <class TyGroup, typename TyElem, typename TySizeT>
394
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ _dst, const TyElem *__restrict__ _src,
395
+ const TySizeT &count) {
396
+ details::_memcpy_async_bytes(group, _dst, _src, count);
397
+ __pipeline_commit();
398
+ }
399
+
400
+ /*
401
+ * Group submit batch of async-copy to cover contiguous 1D array
402
+ * and commit that batch to eventually wait for completion.
403
+ * Object counts are in datum sized chunks, not bytes.
404
+ */
405
+ template <class TyGroup, class TyElem, typename DstLayout, typename SrcLayout>
406
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ dst, const DstLayout &dstLayout,
407
+ const TyElem *__restrict__ src, const SrcLayout &srcLayout) {
408
+ details::_memcpy_async_datum(group, dst, dstLayout, src, srcLayout);
409
+ __pipeline_commit();
410
+ }
411
+
412
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
413
+ template <unsigned int Stage, class TyGroup>
414
+ _CG_STATIC_QUALIFIER void wait_prior(const TyGroup &group) {
415
+ __pipeline_wait_prior(Stage);
416
+ group.sync();
417
+ }
418
+
419
+ /* Group wait all previously submitted memcpy_async to complete. */
420
+ template <class TyGroup>
421
+ _CG_STATIC_QUALIFIER void wait(const TyGroup &group) {
422
+ __pipeline_wait_prior(0);
423
+ group.sync();
424
+ }
425
+
426
+ /***************** CG APIs including pipeline are deprecated *****************/
427
+
428
+ /* Group submit batch of async-copy to cover of contiguous 1D array
429
+ to a pipeline and commit the batch*/
430
+ template <class TyGroup, class TyElem>
431
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void memcpy_async(TyGroup &group, TyElem *dst, size_t dstCount, const TyElem *src, size_t srcCount,
432
+ nvcuda::experimental::pipeline &pipe) {
433
+ details::_memcpy_async_datum(group, dst, dstCount, src, srcCount);
434
+ pipe.commit();
435
+ }
436
+
437
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
438
+ template <unsigned int Stage, class TyGroup>
439
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait_prior(TyGroup &group, nvcuda::experimental::pipeline &pipe) {
440
+ pipe.wait_prior<Stage>();
441
+ group.sync();
442
+ }
443
+
444
+ /* Group wait for stage-S of memcpy_async to complete. */
445
+ template <class TyGroup>
446
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait(TyGroup &group, nvcuda::experimental::pipeline &pipe, size_t stage) {
447
+ pipe.wait(stage);
448
+ group.sync();
449
+ }
450
+ _CG_END_NAMESPACE
451
+
452
+ #endif // _CG_ASYNC_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_REDUCE_H_
50
+ #define _CG_COALESCED_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "coalesced_scan.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto coalesced_reduce_to_one(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ if (group.size() == 32) {
65
+ auto out = val;
66
+ for (int offset = group.size() >> 1; offset > 0; offset >>= 1) {
67
+ out = op(out, group.shfl_up(out, offset));
68
+ }
69
+ return out;
70
+ }
71
+ else {
72
+ auto scan_result =
73
+ inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
74
+ return scan_result;
75
+ }
76
+ }
77
+
78
+ template <typename TyVal, typename TyOp>
79
+ _CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
80
+ auto out = coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
81
+ if (group.size() == 32) {
82
+ return group.shfl(out, 31);
83
+ }
84
+ else {
85
+ unsigned int group_mask = _coalesced_group_data_access::get_mask(group);
86
+ unsigned int last_thread_id = 31 - __clz(group_mask);
87
+ return details::tile::shuffle_dispatch<TyVal>::shfl(
88
+ _CG_STL_NAMESPACE::forward<TyVal>(out), group_mask, last_thread_id, 32);
89
+ }
90
+ }
91
+
92
+ template <typename TyVal, typename TyOp, unsigned int TySize, typename ParentT>
93
+ _CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group,
94
+ TyVal&& val,
95
+ TyOp&& op) -> decltype(op(val, val)) {
96
+ auto out = val;
97
+ for (int mask = TySize >> 1; mask > 0; mask >>= 1) {
98
+ out = op(out, group.shfl_xor(out, mask));
99
+ }
100
+
101
+ return out;
102
+ }
103
+
104
+ } // details
105
+
106
+ _CG_END_NAMESPACE
107
+
108
+ #endif // _CG_COALESCED_REDUCE_H_
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_SCAN_H_
50
+ #define _CG_COALESCED_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "functional.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyGroup, typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ auto out = val;
65
+ for (int mask = 1; mask < group.size(); mask <<= 1) {
66
+ auto tmp = group.shfl_up(out, mask);
67
+ if (mask <= group.thread_rank()) {
68
+ out = op(out, tmp);
69
+ }
70
+ }
71
+
72
+ return out;
73
+ }
74
+
75
+ template <typename TyGroup, typename TyVal, typename TyOp>
76
+ _CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
77
+ const unsigned int groupSize = group.size();
78
+ auto out = val;
79
+
80
+ const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
81
+ unsigned int lanemask = details::lanemask32_lt() & mask;
82
+ unsigned int srcLane = details::laneid();
83
+
84
+ const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
85
+ const unsigned int rank = __popc(lanemask);
86
+
87
+ for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
88
+ if (i <= rank) {
89
+ srcLane -= j;
90
+ j = i; /* maximum possible lane */
91
+
92
+ unsigned int begLane = base + rank - i; /* minimum possible lane */
93
+
94
+ /* Next source lane is in the range [ begLane .. srcLane ]
95
+ * If begLane < srcLane then do a binary search.
96
+ */
97
+ while (begLane < srcLane) {
98
+ const unsigned int halfLane = (begLane + srcLane) >> 1;
99
+ const unsigned int halfMask = lanemask >> halfLane;
100
+ const unsigned int d = __popc(halfMask);
101
+ if (d < i) {
102
+ srcLane = halfLane - 1; /* halfLane too large */
103
+ }
104
+ else if ((i < d) || !(halfMask & 0x01)) {
105
+ begLane = halfLane + 1; /* halfLane too small */
106
+ }
107
+ else {
108
+ begLane = srcLane = halfLane; /* happen to hit */
109
+ }
110
+ }
111
+ }
112
+
113
+ auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
114
+ if (i <= rank) {
115
+ out = op(out, tmp);
116
+ }
117
+ }
118
+ return out;
119
+ }
120
+
121
+ template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
122
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
123
+ TyVal&& val,
124
+ TyOp&& op) -> decltype(op(val, val)) {
125
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
126
+ }
127
+
128
+ template <typename TyVal, typename TyOp>
129
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
130
+ if (group.size() == 32) {
131
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
132
+ }
133
+ else {
134
+ return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
135
+ }
136
+ }
137
+
138
+ template <bool IntegralOptimized>
139
+ struct scan_choose_convertion;
140
+
141
+ template<>
142
+ struct scan_choose_convertion<true> {
143
+ template <typename TyGroup, typename TyRes, typename TyVal>
144
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
145
+ return result - val;
146
+ }
147
+ };
148
+
149
+ template<>
150
+ struct scan_choose_convertion<false> {
151
+ template <typename TyGroup, typename TyRes, typename TyVal>
152
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
153
+ auto ret = group.shfl_up(result, 1);
154
+ if (group.thread_rank() == 0) {
155
+ return {};
156
+ }
157
+ else {
158
+ return ret;
159
+ }
160
+ }
161
+ };
162
+
163
+ template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
164
+ _CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
165
+ using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
166
+ && _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
167
+ return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
168
+ }
169
+
170
+ } // details
171
+
172
+ _CG_END_NAMESPACE
173
+
174
+ #endif // _CG_COALESCED_SCAN_H_
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_DRIVER_API_H
50
+ #define _CG_DRIVER_API_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details {
57
+ template <unsigned int RegId>
58
+ _CG_QUALIFIER unsigned int load_env_reg() {
59
+ // Abort by default
60
+ _CG_ABORT();
61
+ return 0;
62
+ }
63
+
64
+ template <unsigned int HiReg, unsigned int LoReg>
65
+ _CG_QUALIFIER unsigned long long load_env_reg64() {
66
+ unsigned long long registerLo = load_env_reg<LoReg>();
67
+ unsigned long long registerHi = load_env_reg<HiReg>();
68
+
69
+ return (registerHi << 32) | registerLo;
70
+ }
71
+
72
+ // inline PTX for accessing registers requires an immediate for the special reg
73
+ # define LOAD_ENVREG(NUMBER) \
74
+ template <> _CG_QUALIFIER unsigned int load_env_reg<NUMBER>() { \
75
+ unsigned int r; \
76
+ asm ("mov.u32 %0, %%envreg" #NUMBER ";" : "=r"(r)); \
77
+ return r; \
78
+ }
79
+
80
+ // Instantiate loaders for registers used
81
+ LOAD_ENVREG(0);
82
+ LOAD_ENVREG(1);
83
+ LOAD_ENVREG(2);
84
+ # undef LOAD_ENVREG
85
+
86
+ struct grid_workspace {
87
+ unsigned int wsSize;
88
+ unsigned int barrier;
89
+ };
90
+
91
+ _CG_QUALIFIER grid_workspace* get_grid_workspace() {
92
+ unsigned long long gridWsAbiAddress = load_env_reg64<1, 2>();
93
+ // Interpret the address from envreg 1 and 2 as the driver's grid workspace
94
+ return (reinterpret_cast<grid_workspace*>(gridWsAbiAddress));
95
+ }
96
+ }
97
+ _CG_END_NAMESPACE
98
+
99
+ #endif // _CG_DRIVER_API_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_FUNCTIONAL_H
50
+ #define _CG_FUNCTIONAL_H
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ #ifdef _CG_USE_CUDA_STL
57
+ # include <cuda/std/functional>
58
+ #endif
59
+
60
+ _CG_BEGIN_NAMESPACE
61
+
62
+ namespace details {
63
+ #ifdef _CG_USE_CUDA_STL
64
+ using cuda::std::plus;
65
+ using cuda::std::bit_and;
66
+ using cuda::std::bit_xor;
67
+ using cuda::std::bit_or;
68
+ #else
69
+ template <typename Ty> struct plus {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 + arg2;}};
70
+ template <typename Ty> struct bit_and {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 & arg2;}};
71
+ template <typename Ty> struct bit_xor {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 ^ arg2;}};
72
+ template <typename Ty> struct bit_or {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 | arg2;}};
73
+ #endif // _CG_USE_PLATFORM_STL
74
+ } // details
75
+
76
+ template <typename Ty>
77
+ struct plus : public details::plus<Ty> {};
78
+
79
+ template <typename Ty>
80
+ struct less {
81
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
82
+ return (arg2 < arg1) ? arg2 : arg1;
83
+ }
84
+ };
85
+
86
+ template <typename Ty>
87
+ struct greater {
88
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
89
+ return (arg1 < arg2) ? arg2 : arg1;
90
+ }
91
+ };
92
+
93
+ template <typename Ty>
94
+ struct bit_and : public details::bit_and<Ty> {};
95
+
96
+ template <typename Ty>
97
+ struct bit_xor : public details::bit_xor<Ty> {};
98
+
99
+ template <typename Ty>
100
+ struct bit_or : public details::bit_or<Ty> {};
101
+
102
+ #if defined(_CG_HAS_STL_ATOMICS)
103
+ namespace details {
104
+ template <class Ty>
105
+ using _atomic_is_type_supported = _CG_STL_NAMESPACE::integral_constant<bool,
106
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) == 4 || sizeof(Ty) == 8)>;
107
+
108
+ template <typename TyOp> struct _atomic_op_supported : public _CG_STL_NAMESPACE::false_type {};
109
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::plus<Ty>> : public _atomic_is_type_supported<Ty> {};
110
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::less<Ty>> : public _atomic_is_type_supported<Ty> {};
111
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::greater<Ty>> : public _atomic_is_type_supported<Ty> {};
112
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_and<Ty>> : public _atomic_is_type_supported<Ty> {};
113
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_or<Ty>> : public _atomic_is_type_supported<Ty> {};
114
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_xor<Ty>> : public _atomic_is_type_supported<Ty> {};
115
+
116
+ template<typename TyAtomic, typename TyVal, typename TyOp>
117
+ _CG_QUALIFIER remove_qual<TyVal> atomic_cas_fallback(TyAtomic&& atomic, TyVal&& val, TyOp&& op) {
118
+ auto old = atomic.load(cuda::std::memory_order_relaxed);
119
+ while(!atomic.compare_exchange_weak(old, op(old, val), cuda::std::memory_order_relaxed));
120
+ return old;
121
+ }
122
+
123
+ template<typename TyOp>
124
+ struct op_picker;
125
+
126
+ template<typename TyVal>
127
+ struct op_picker<cooperative_groups::plus<TyVal>> {
128
+ template<typename TyAtomic>
129
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
130
+ return atomic.fetch_add(val, cuda::std::memory_order_relaxed);
131
+ }
132
+ };
133
+
134
+ template<typename TyVal>
135
+ struct op_picker<cooperative_groups::less<TyVal>> {
136
+ template<typename TyAtomic>
137
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
138
+ return atomic.fetch_min(val, cuda::std::memory_order_relaxed);
139
+ }
140
+ };
141
+
142
+ template<typename TyVal>
143
+ struct op_picker<cooperative_groups::greater<TyVal>> {
144
+ template<typename TyAtomic>
145
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
146
+ return atomic.fetch_max(val, cuda::std::memory_order_relaxed);
147
+ }
148
+ };
149
+
150
+ template<typename TyVal>
151
+ struct op_picker<cooperative_groups::bit_and<TyVal>> {
152
+ template<typename TyAtomic>
153
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
154
+ return atomic.fetch_and(val, cuda::std::memory_order_relaxed);
155
+ }
156
+ };
157
+
158
+ template<typename TyVal>
159
+ struct op_picker<cooperative_groups::bit_xor<TyVal>> {
160
+ template<typename TyAtomic>
161
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
162
+ return atomic.fetch_xor(val, cuda::std::memory_order_relaxed);
163
+ }
164
+ };
165
+
166
+ template<typename TyVal>
167
+ struct op_picker<cooperative_groups::bit_or<TyVal>> {
168
+ template<typename TyAtomic>
169
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
170
+ return atomic.fetch_or(val, cuda::std::memory_order_relaxed);
171
+ }
172
+ };
173
+
174
+ template<bool atomic_supported>
175
+ struct atomic_update_dispatch {};
176
+
177
+ template<>
178
+ struct atomic_update_dispatch<false> {
179
+ template<typename TyAtomic, typename TyVal, typename TyOp>
180
+ _CG_STATIC_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
181
+ return atomic_cas_fallback(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
182
+ }
183
+ };
184
+
185
+ template<>
186
+ struct atomic_update_dispatch<true> {
187
+ template<typename TyAtomic, typename TyVal, typename TyOp>
188
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val, TyOp&& op) {
189
+ using dispatch = op_picker<details::remove_qual<TyOp>>;
190
+
191
+ return dispatch::atomic_update(atomic, val);
192
+ }
193
+ };
194
+
195
+ template<typename TyAtomic, typename TyVal, typename TyOp>
196
+ _CG_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
197
+ using dispatch = atomic_update_dispatch<_atomic_op_supported<details::remove_qual<TyOp>>::value>;
198
+
199
+ return dispatch::atomic_update(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
200
+ }
201
+
202
+ template<typename TyAtomic, typename TyVal>
203
+ _CG_QUALIFIER void atomic_store(TyAtomic& atomic, TyVal&& val) {
204
+ atomic.store(val, cuda::std::memory_order_relaxed);
205
+ }
206
+ }
207
+ #endif
208
+
209
+ _CG_END_NAMESPACE
210
+
211
+ #endif
212
+ #endif //_CG_FUNCTIONAL_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_HELPERS_H_
50
+ # define _COOPERATIVE_GROUPS_HELPERS_H_
51
+
52
+ #include "info.h"
53
+ #include "sync.h"
54
+
55
+ _CG_BEGIN_NAMESPACE
56
+
57
+ namespace details {
58
+ #ifdef _CG_CPP11_FEATURES
59
+ template <typename Ty> struct _is_float_or_half : public _CG_STL_NAMESPACE::is_floating_point<Ty> {};
60
+ # ifdef _CG_HAS_FP16_COLLECTIVE
61
+ template <> struct _is_float_or_half<__half> : public _CG_STL_NAMESPACE::true_type {};
62
+ template <> struct _is_float_or_half<__half2> : public _CG_STL_NAMESPACE::true_type {};
63
+ # endif
64
+ template <typename Ty>
65
+ using is_float_or_half = _is_float_or_half<typename _CG_STL_NAMESPACE::remove_cv<Ty>::type>;
66
+
67
+ // Non-STL utility templates
68
+ template <typename Ty>
69
+ using remove_qual = typename _CG_STL_NAMESPACE::remove_cv<typename _CG_STL_NAMESPACE::remove_reference<Ty>::type>::type;
70
+
71
+ template <typename TyLhs, typename TyRhs>
72
+ using is_op_type_same = _CG_STL_NAMESPACE::is_same<remove_qual<TyLhs>, remove_qual<TyRhs>
73
+ >;
74
+ #endif
75
+
76
+ template <typename TyTrunc>
77
+ _CG_STATIC_QUALIFIER TyTrunc vec3_to_linear(dim3 index, dim3 nIndex) {
78
+ return ((TyTrunc)index.z * nIndex.y * nIndex.x) +
79
+ ((TyTrunc)index.y * nIndex.x) +
80
+ (TyTrunc)index.x;
81
+ }
82
+
83
+ namespace cta {
84
+
85
+ _CG_STATIC_QUALIFIER void sync()
86
+ {
87
+ __barrier_sync(0);
88
+ }
89
+
90
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
91
+ {
92
+ return static_cast<unsigned int>(blockDim.x * blockDim.y * blockDim.z);
93
+ }
94
+
95
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
96
+ {
97
+ return vec3_to_linear<unsigned int>(threadIdx, blockDim);
98
+ }
99
+
100
+ _CG_STATIC_QUALIFIER dim3 group_index()
101
+ {
102
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
103
+ }
104
+
105
+ _CG_STATIC_QUALIFIER dim3 thread_index()
106
+ {
107
+ return dim3(threadIdx.x, threadIdx.y, threadIdx.z);
108
+ }
109
+
110
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
111
+ {
112
+ return dim3(blockDim.x, blockDim.y, blockDim.z);
113
+ }
114
+
115
+ // Legacy aliases
116
+ _CG_STATIC_QUALIFIER unsigned int size()
117
+ {
118
+ return num_threads();
119
+ }
120
+
121
+ _CG_STATIC_QUALIFIER dim3 block_dim()
122
+ {
123
+ return dim_threads();
124
+ }
125
+
126
+ };
127
+
128
+ class _coalesced_group_data_access {
129
+ public:
130
+ // Retrieve mask of coalesced groups and tiles
131
+ template <typename TyGroup>
132
+ _CG_STATIC_QUALIFIER unsigned int get_mask(const TyGroup &group) {
133
+ return group.get_mask();
134
+ }
135
+
136
+ template <typename TyGroup>
137
+ _CG_STATIC_QUALIFIER TyGroup construct_from_mask(unsigned int mask) {
138
+ return TyGroup(mask);
139
+ }
140
+
141
+ template <typename TyGroup>
142
+ _CG_STATIC_QUALIFIER void modify_meta_group(TyGroup &group, unsigned int mgRank, unsigned int mgSize) {
143
+ group._data.coalesced.metaGroupRank = mgRank;
144
+ group._data.coalesced.metaGroupSize = mgSize;
145
+ }
146
+ };
147
+
148
+ namespace tile {
149
+ template <unsigned int TileCount, unsigned int TileMask, unsigned int LaneMask, unsigned int ShiftCount>
150
+ struct _tile_helpers{
151
+ _CG_STATIC_CONST_DECL unsigned int tileCount = TileCount;
152
+ _CG_STATIC_CONST_DECL unsigned int tileMask = TileMask;
153
+ _CG_STATIC_CONST_DECL unsigned int laneMask = LaneMask;
154
+ _CG_STATIC_CONST_DECL unsigned int shiftCount = ShiftCount;
155
+ };
156
+
157
+ template <unsigned int> struct tile_helpers;
158
+ template <> struct tile_helpers<32> : public _tile_helpers<1, 0xFFFFFFFF, 0x1F, 5> {};
159
+ template <> struct tile_helpers<16> : public _tile_helpers<2, 0x0000FFFF, 0x0F, 4> {};
160
+ template <> struct tile_helpers<8> : public _tile_helpers<4, 0x000000FF, 0x07, 3> {};
161
+ template <> struct tile_helpers<4> : public _tile_helpers<8, 0x0000000F, 0x03, 2> {};
162
+ template <> struct tile_helpers<2> : public _tile_helpers<16, 0x00000003, 0x01, 1> {};
163
+ template <> struct tile_helpers<1> : public _tile_helpers<32, 0x00000001, 0x00, 0> {};
164
+
165
+ #ifdef _CG_CPP11_FEATURES
166
+ namespace shfl {
167
+ /***********************************************************************************
168
+ * Recursively Sliced Shuffle
169
+ * Purpose:
170
+ * Slices an input type a number of times into integral types so that shuffles
171
+ * are well defined
172
+ * Expectations:
173
+ * This object *should not* be used from a reinterpret_cast pointer unless
174
+ * some alignment guarantees can be met. Use a memcpy to guarantee that loads
175
+ * from the integral types stored within are aligned and correct.
176
+ **********************************************************************************/
177
+ template <unsigned int count, bool intSized = (count <= sizeof(int))>
178
+ struct recursive_sliced_shuffle_helper;
179
+
180
+ template <unsigned int count>
181
+ struct recursive_sliced_shuffle_helper<count, true> {
182
+ int val;
183
+
184
+ template <typename TyFn>
185
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
186
+ val = shfl(val);
187
+ }
188
+ };
189
+
190
+ template <unsigned int count>
191
+ struct recursive_sliced_shuffle_helper<count, false> {
192
+ int val;
193
+ recursive_sliced_shuffle_helper<count - sizeof(int)> next;
194
+
195
+ template <typename TyFn>
196
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
197
+ val = shfl(val);
198
+ next.invoke_shuffle(shfl);
199
+ }
200
+ };
201
+ }
202
+
203
+ struct _memory_shuffle {
204
+ template <typename TyElem, typename TyShflFn>
205
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
206
+ static_assert(sizeof(TyElem) <= 32, "Cooperative groups collectives are limited to types smaller than 32B");
207
+ return TyElem{};
208
+ }
209
+
210
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
211
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
212
+ auto shfl = [=](int val) -> int {
213
+ return 0;
214
+ };
215
+
216
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
217
+ }
218
+
219
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
220
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
221
+ auto shfl = [=](int val) -> int {
222
+ return 0;
223
+ };
224
+
225
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
226
+ }
227
+
228
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
229
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
230
+ auto shfl = [=](int val) -> int {
231
+ return 0;
232
+ };
233
+
234
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
235
+ }
236
+
237
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
238
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
239
+ auto shfl = [=](int val) -> int {
240
+ return 0;
241
+ };
242
+
243
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
244
+ }
245
+ };
246
+
247
+ /***********************************************************************************
248
+ * Intrinsic Device Function Shuffle
249
+ * Purpose:
250
+ * Uses a shuffle helper that has characteristics best suited for moving
251
+ * elements between threads
252
+ * Expectations:
253
+ * Object given will be forced into an l-value type so that it can be used
254
+ * with a helper structure that reinterprets the data into intrinsic compatible
255
+ * types
256
+ * Notes:
257
+ * !! TyRet is required so that objects are returned by value and not as
258
+ * dangling references depending on the value category of the passed object
259
+ **********************************************************************************/
260
+ struct _intrinsic_compat_shuffle {
261
+ template <unsigned int count>
262
+ using shfl_helper = shfl::recursive_sliced_shuffle_helper<count>;
263
+
264
+ template <typename TyElem, typename TyShflFn>
265
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
266
+ static_assert(__is_trivially_copyable(TyElem), "Type is not compatible with device shuffle");
267
+ shfl_helper<sizeof(TyElem)> helper;
268
+ memcpy(&helper, &elem, sizeof(TyElem));
269
+ helper.invoke_shuffle(fn);
270
+ memcpy(&elem, &helper, sizeof(TyElem));
271
+ return elem;
272
+ }
273
+
274
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
275
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
276
+ auto shfl = [=](int val) -> int {
277
+ return __shfl_sync(gMask, val, srcRank, threads);
278
+ };
279
+
280
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
281
+ }
282
+
283
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
284
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
285
+ auto shfl = [=](int val) -> int {
286
+ return __shfl_down_sync(gMask, val, delta, threads);
287
+ };
288
+
289
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
290
+ }
291
+
292
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
293
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
294
+ auto shfl = [=](int val) -> int {
295
+ return __shfl_up_sync(gMask, val, delta, threads);
296
+ };
297
+
298
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
299
+ }
300
+
301
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
302
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
303
+ auto shfl = [=](int val) -> int {
304
+ return __shfl_xor_sync(gMask, val, lMask, threads);
305
+ };
306
+
307
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
308
+ }
309
+ };
310
+
311
+ struct _native_shuffle {
312
+ template <typename TyElem>
313
+ _CG_STATIC_QUALIFIER TyElem shfl(
314
+ TyElem elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
315
+ return static_cast<TyElem>(__shfl_sync(gMask, elem, srcRank, threads));
316
+ }
317
+
318
+ template <typename TyElem>
319
+ _CG_STATIC_QUALIFIER TyElem shfl_down(
320
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
321
+ return static_cast<TyElem>(__shfl_down_sync(gMask, elem, delta, threads));
322
+ }
323
+
324
+ template <typename TyElem>
325
+ _CG_STATIC_QUALIFIER TyElem shfl_up(
326
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
327
+ return static_cast<TyElem>(__shfl_up_sync(gMask, elem, delta, threads));
328
+ }
329
+
330
+ template <typename TyElem>
331
+ _CG_STATIC_QUALIFIER TyElem shfl_xor(
332
+ TyElem elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
333
+ return static_cast<TyElem>(__shfl_xor_sync(gMask, elem, lMask, threads));
334
+ }
335
+ };
336
+
337
+ // Almost all arithmetic types are supported by native shuffle
338
+ // Vector types are the exception
339
+ template <typename TyElem>
340
+ using use_native_shuffle = _CG_STL_NAMESPACE::integral_constant<
341
+ bool,
342
+ _CG_STL_NAMESPACE::is_integral<
343
+ remove_qual<TyElem>>::value ||
344
+ details::is_float_or_half<
345
+ remove_qual<TyElem>>::value
346
+ >;
347
+
348
+ constexpr unsigned long long _MemoryShuffleCutoff = 32;
349
+
350
+ template <typename TyElem,
351
+ bool IsNative = use_native_shuffle<TyElem>::value,
352
+ bool InMem = (sizeof(TyElem) > _MemoryShuffleCutoff)>
353
+ struct shuffle_dispatch;
354
+
355
+ template <typename TyElem>
356
+ struct shuffle_dispatch<TyElem, true, false> : public _native_shuffle {};
357
+
358
+ template <typename TyElem>
359
+ struct shuffle_dispatch<TyElem, false, false> : public _intrinsic_compat_shuffle {};
360
+
361
+ template <typename TyElem>
362
+ struct shuffle_dispatch<TyElem, false, true> : public _memory_shuffle {};
363
+
364
+ #endif //_CG_CPP11_FEATURES
365
+ };
366
+
367
+ namespace multi_grid {
368
+ struct multi_grid_functions;
369
+ };
370
+
371
+ namespace grid {
372
+ _CG_STATIC_QUALIFIER void sync(unsigned int *bar) {
373
+ unsigned int expected = gridDim.x * gridDim.y * gridDim.z;
374
+
375
+ details::sync_grids(expected, bar);
376
+ }
377
+
378
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks()
379
+ {
380
+ // grid.y * grid.z -> [max(65535) * max(65535)] fits within 4b, promote after multiplication
381
+ // grid.x * (grid.y * grid.z) -> [max(2^31-1) * max(65535 * 65535)] exceeds 4b, promote before multiplication
382
+ return (unsigned long long)gridDim.x * (gridDim.y * gridDim.z);
383
+ }
384
+
385
+ _CG_STATIC_QUALIFIER unsigned long long num_threads()
386
+ {
387
+ return num_blocks() * cta::num_threads();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long block_rank()
391
+ {
392
+ return vec3_to_linear<unsigned long long>(blockIdx, gridDim);
393
+ }
394
+
395
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank()
396
+ {
397
+ return block_rank() * cta::num_threads() + cta::thread_rank();
398
+ }
399
+
400
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
401
+ {
402
+ return dim3(gridDim.x, gridDim.y, gridDim.z);
403
+ }
404
+
405
+ _CG_STATIC_QUALIFIER dim3 block_index()
406
+ {
407
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
408
+ }
409
+
410
+ #if defined(_CG_HAS_CLUSTER_GROUP)
411
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
412
+ return __clusterGridDimInClusters();
413
+ }
414
+
415
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
416
+ const dim3 dimClusters = dim_clusters();
417
+ return dimClusters.x * dimClusters.y * dimClusters.z;
418
+ }
419
+
420
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
421
+ return __clusterIdx();
422
+ }
423
+
424
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
425
+ return vec3_to_linear<unsigned long long>(cluster_index(), dim_clusters());
426
+ }
427
+ #endif
428
+
429
+ // Legacy aliases
430
+ _CG_STATIC_QUALIFIER unsigned long long size()
431
+ {
432
+ return num_threads();
433
+ }
434
+
435
+ _CG_STATIC_QUALIFIER dim3 grid_dim()
436
+ {
437
+ return dim_blocks();
438
+ }
439
+ };
440
+
441
+
442
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
443
+
444
+ namespace multi_grid {
445
+ _CG_STATIC_QUALIFIER unsigned long long get_intrinsic_handle()
446
+ {
447
+ return (cudaCGGetIntrinsicHandle(cudaCGScopeMultiGrid));
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void sync(const unsigned long long handle)
451
+ {
452
+ cudaError_t err = cudaCGSynchronize(handle, 0);
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER unsigned int size(const unsigned long long handle)
456
+ {
457
+ unsigned int numThreads = 0;
458
+ cudaCGGetSize(&numThreads, NULL, handle);
459
+ return numThreads;
460
+ }
461
+
462
+ _CG_STATIC_QUALIFIER unsigned int thread_rank(const unsigned long long handle)
463
+ {
464
+ unsigned int threadRank = 0;
465
+ cudaCGGetRank(&threadRank, NULL, handle);
466
+ return threadRank;
467
+ }
468
+
469
+ _CG_STATIC_QUALIFIER unsigned int grid_rank(const unsigned long long handle)
470
+ {
471
+ unsigned int gridRank = 0;
472
+ cudaCGGetRank(NULL, &gridRank, handle);
473
+ return gridRank;
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int num_grids(const unsigned long long handle)
477
+ {
478
+ unsigned int numGrids = 0;
479
+ cudaCGGetSize(NULL, &numGrids, handle);
480
+ return numGrids;
481
+ }
482
+
483
+ # ifdef _CG_CPP11_FEATURES
484
+ struct multi_grid_functions {
485
+ decltype(multi_grid::get_intrinsic_handle) *get_intrinsic_handle;
486
+ decltype(multi_grid::sync) *sync;
487
+ decltype(multi_grid::size) *size;
488
+ decltype(multi_grid::thread_rank) *thread_rank;
489
+ decltype(multi_grid::grid_rank) *grid_rank;
490
+ decltype(multi_grid::num_grids) *num_grids;
491
+ };
492
+
493
+ template <typename = void>
494
+ _CG_STATIC_QUALIFIER const multi_grid_functions* load_grid_intrinsics() {
495
+ __constant__ static const multi_grid_functions mgf {
496
+ &multi_grid::get_intrinsic_handle,
497
+ &multi_grid::sync,
498
+ &multi_grid::size,
499
+ &multi_grid::thread_rank,
500
+ &multi_grid::grid_rank,
501
+ &multi_grid::num_grids
502
+ };
503
+
504
+ return &mgf;
505
+ }
506
+ # endif
507
+ };
508
+ #endif
509
+
510
+ #if defined(_CG_HAS_CLUSTER_GROUP)
511
+ namespace cluster {
512
+
513
+ _CG_STATIC_QUALIFIER bool isReal()
514
+ {
515
+ return __clusterDimIsSpecified();
516
+ }
517
+
518
+ _CG_STATIC_QUALIFIER void barrier_arrive()
519
+ {
520
+ __cluster_barrier_arrive();
521
+ }
522
+
523
+ _CG_STATIC_QUALIFIER void barrier_wait()
524
+ {
525
+ __cluster_barrier_wait();
526
+ }
527
+
528
+ _CG_STATIC_QUALIFIER void sync()
529
+ {
530
+ barrier_arrive();
531
+ barrier_wait();
532
+ }
533
+
534
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
535
+ {
536
+ return __cluster_query_shared_rank(addr);
537
+ }
538
+
539
+ template <typename T>
540
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
541
+ {
542
+ return static_cast<T*>(__cluster_map_shared_rank(addr, rank));
543
+ }
544
+
545
+ _CG_STATIC_QUALIFIER dim3 block_index()
546
+ {
547
+ return __clusterRelativeBlockIdx();
548
+ }
549
+
550
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
551
+ {
552
+ return __clusterRelativeBlockRank();
553
+ }
554
+
555
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
556
+ {
557
+ return block_rank() * cta::num_threads() + cta::thread_rank();
558
+ }
559
+
560
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
561
+ {
562
+ return __clusterDim();
563
+ }
564
+
565
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
566
+ {
567
+ return __clusterSizeInBlocks();
568
+ }
569
+
570
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
571
+ {
572
+ const dim3 dimBlocks = dim_blocks();
573
+ const unsigned int x = dimBlocks.x * blockDim.x;
574
+ const unsigned int y = dimBlocks.y * blockDim.y;
575
+ const unsigned int z = dimBlocks.z * blockDim.z;
576
+ return dim3(x, y, z);
577
+ }
578
+
579
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
580
+ {
581
+ return num_blocks() * cta::num_threads();
582
+ }
583
+
584
+ };
585
+ #endif
586
+
587
+ _CG_STATIC_QUALIFIER unsigned int laneid()
588
+ {
589
+ unsigned int laneid;
590
+ asm ("mov.u32 %0, %%laneid;" : "=r"(laneid));
591
+ return laneid;
592
+ }
593
+
594
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_eq()
595
+ {
596
+ unsigned int lanemask32_eq;
597
+ asm ("mov.u32 %0, %%lanemask_eq;" : "=r"(lanemask32_eq));
598
+ return (lanemask32_eq);
599
+ }
600
+
601
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_lt()
602
+ {
603
+ unsigned int lanemask32_lt;
604
+ asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask32_lt));
605
+ return (lanemask32_lt);
606
+ }
607
+
608
+ _CG_STATIC_QUALIFIER void abort()
609
+ {
610
+ _CG_ABORT();
611
+ }
612
+
613
+ template <typename Ty>
614
+ _CG_QUALIFIER void assert_if_not_arithmetic() {
615
+ #ifdef _CG_CPP11_FEATURES
616
+ static_assert(
617
+ _CG_STL_NAMESPACE::is_integral<Ty>::value ||
618
+ details::is_float_or_half<Ty>::value,
619
+ "Error: Ty is neither integer or float"
620
+ );
621
+ #endif //_CG_CPP11_FEATURES
622
+ }
623
+
624
+ #ifdef _CG_CPP11_FEATURES
625
+ _CG_STATIC_QUALIFIER constexpr unsigned int log2(unsigned int x) {
626
+ return x == 1 ? 0 : 1 + log2(x / 2);
627
+ }
628
+ #endif //_CG_CPP11_FEATURES
629
+
630
+ }; // !Namespace internal
631
+
632
+ _CG_END_NAMESPACE
633
+
634
+ #endif /* !_COOPERATIVE_GROUPS_HELPERS_H_ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+
50
+
51
+ #ifndef _CG_INFO_H_
52
+ #define _CG_INFO_H_
53
+ /*
54
+ ** Define: _CG_VERSION
55
+ */
56
+ #define _CG_VERSION 1000
57
+
58
+ /*
59
+ ** Define: _CG_ABI_VERSION
60
+ */
61
+ #ifndef _CG_ABI_VERSION
62
+ # define _CG_ABI_VERSION 1
63
+ #endif
64
+
65
+ /*
66
+ ** Define: _CG_ABI_EXPERIMENTAL
67
+ ** Desc: If enabled, sets all features enabled (ABI-breaking or experimental)
68
+ */
69
+ #if defined(_CG_ABI_EXPERIMENTAL)
70
+ #endif
71
+
72
+ #define _CG_CONCAT_INNER(x, y) x ## y
73
+ #define _CG_CONCAT_OUTER(x, y) _CG_CONCAT_INNER(x, y)
74
+ #define _CG_NAMESPACE _CG_CONCAT_OUTER(__v, _CG_ABI_VERSION)
75
+
76
+ #define _CG_BEGIN_NAMESPACE \
77
+ namespace cooperative_groups { namespace _CG_NAMESPACE {
78
+ #define _CG_END_NAMESPACE \
79
+ }; using namespace _CG_NAMESPACE; };
80
+
81
+ #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MSC_VER >= 1900))
82
+ # define _CG_CPP11_FEATURES
83
+ #endif
84
+
85
+ #if !defined(_CG_QUALIFIER)
86
+ # define _CG_QUALIFIER __forceinline__ __device__
87
+ #endif
88
+ #if !defined(_CG_STATIC_QUALIFIER)
89
+ # define _CG_STATIC_QUALIFIER static __forceinline__ __device__
90
+ #endif
91
+ #if !defined(_CG_CONSTEXPR_QUALIFIER)
92
+ # if defined(_CG_CPP11_FEATURES)
93
+ # define _CG_CONSTEXPR_QUALIFIER constexpr __forceinline__ __device__
94
+ # else
95
+ # define _CG_CONSTEXPR_QUALIFIER _CG_QUALIFIER
96
+ # endif
97
+ #endif
98
+ #if !defined(_CG_STATIC_CONSTEXPR_QUALIFIER)
99
+ # if defined(_CG_CPP11_FEATURES)
100
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER static constexpr __forceinline__ __device__
101
+ # else
102
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER _CG_STATIC_QUALIFIER
103
+ # endif
104
+ #endif
105
+
106
+ #if defined(_MSC_VER)
107
+ # define _CG_DEPRECATED __declspec(deprecated)
108
+ #else
109
+ # define _CG_DEPRECATED __attribute__((deprecated))
110
+ #endif
111
+
112
+ #if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__)
113
+ # define _CG_HAS_GRID_GROUP
114
+ #endif
115
+ #if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__)
116
+ # define _CG_HAS_MULTI_GRID_GROUP
117
+ #endif
118
+ #if (__CUDA_ARCH__ >= 700) || !defined(__CUDA_ARCH__)
119
+ # define _CG_HAS_MATCH_COLLECTIVE
120
+ #endif
121
+
122
+ #if (__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__) && (defined(__NVCC__) || defined(__CUDACC_RTC__))
123
+ # define _CG_HAS_OP_REDUX
124
+ #endif
125
+
126
+ #if ((__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__)) && !defined(_CG_USER_PROVIDED_SHARED_MEMORY)
127
+ # define _CG_HAS_RESERVED_SHARED
128
+ #endif
129
+
130
+ #if ((__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__)) && \
131
+ (defined(__NVCC__) || defined(__CUDACC_RTC__) || defined(_CG_CLUSTER_INTRINSICS_AVAILABLE)) && \
132
+ defined(_CG_CPP11_FEATURES)
133
+ # define _CG_HAS_CLUSTER_GROUP
134
+ #endif
135
+
136
+ #if (__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__)
137
+ # define _CG_HAS_INSTR_ELECT
138
+ #endif
139
+
140
+ // Has __half and __half2
141
+ // Only usable if you include the cuda_fp16.h extension, and
142
+ // _before_ including cooperative_groups.h
143
+ #ifdef __CUDA_FP16_TYPES_EXIST__
144
+ # define _CG_HAS_FP16_COLLECTIVE
145
+ #endif
146
+
147
+ // Include libcu++ where supported.
148
+ #if defined(_CG_CPP11_FEATURES) && !defined(__QNX__) && !defined(__ibmxl__) && \
149
+ (defined(__NVCC__) || defined(__CUDACC_RTC__)) && \
150
+ (defined(__x86_64__) || defined(__aarch64__) || defined(__ppc64__)|| defined(_M_X64) || defined(_M_ARM64)) && \
151
+ (defined(_MSC_VER) || defined(__GNUC__) || defined(__clang__))
152
+ # define _CG_USE_CUDA_STL
153
+ #else
154
+ # define _CG_USE_OWN_TRAITS
155
+ #endif
156
+
157
+ #if defined(_CG_USE_CUDA_STL) && (!defined(__CUDA_ARCH__) || \
158
+ ((!defined(_MSC_VER) && __CUDA_ARCH__ >= 600) || (defined(_MSC_VER) && __CUDA_ARCH__ >= 700)))
159
+ # define _CG_HAS_STL_ATOMICS
160
+ #endif
161
+
162
+ #ifdef _CG_CPP11_FEATURES
163
+ // Use cuda::std:: for type_traits
164
+ # if defined(_CG_USE_CUDA_STL)
165
+ # define _CG_STL_NAMESPACE cuda::std
166
+ # include <cuda/std/type_traits>
167
+ // Use CG's implementation of type traits
168
+ # else
169
+ # define _CG_STL_NAMESPACE cooperative_groups::details::templates
170
+ # endif
171
+ #endif
172
+
173
+ #ifdef _CG_CPP11_FEATURES
174
+ # define _CG_STATIC_CONST_DECL static constexpr
175
+ # define _CG_CONST_DECL constexpr
176
+ #else
177
+ # define _CG_STATIC_CONST_DECL static const
178
+ # define _CG_CONST_DECL const
179
+ #endif
180
+
181
+ #if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__)
182
+ # define _CG_ASM_PTR_CONSTRAINT "r"
183
+ #else
184
+ # define _CG_ASM_PTR_CONSTRAINT "l"
185
+ #endif
186
+
187
+ /*
188
+ ** Define: CG_DEBUG
189
+ ** What: Enables various runtime safety checks
190
+ */
191
+ #if defined(__CUDACC_DEBUG__) && defined(CG_DEBUG) && !defined(NDEBUG)
192
+ # define _CG_DEBUG
193
+ #endif
194
+
195
+ #if defined(_CG_DEBUG)
196
+ # include <assert.h>
197
+ # define _CG_ASSERT(x) assert((x));
198
+ # define _CG_ABORT() assert(0);
199
+ #else
200
+ # define _CG_ASSERT(x)
201
+ # define _CG_ABORT() __trap();
202
+ #endif
203
+
204
+ _CG_BEGIN_NAMESPACE
205
+
206
+ namespace details {
207
+ _CG_STATIC_CONST_DECL unsigned int default_max_block_size = 1024;
208
+
209
+ #if defined(_CG_CPP11_FEATURES) && !defined(_CG_USE_CUDA_STL)
210
+ namespace templates {
211
+
212
+ /**
213
+ * Integral constants
214
+ **/
215
+ template <typename Ty, Ty Val>
216
+ struct integral_constant {
217
+ static constexpr Ty value = Val;
218
+ typedef Ty type;
219
+
220
+ _CG_QUALIFIER constexpr operator type() const noexcept { return value; }
221
+ _CG_QUALIFIER constexpr type operator()() const noexcept { return value; }
222
+ };
223
+
224
+ typedef integral_constant<bool, true> true_type;
225
+ typedef integral_constant<bool, false> false_type;
226
+
227
+ /**
228
+ * CV Qualifiers
229
+ **/
230
+ template <class Ty> struct is_lvalue_reference : public details::templates::false_type {};
231
+ template <class Ty> struct is_lvalue_reference<Ty&> : public details::templates::true_type {};
232
+
233
+ template <class Ty> struct remove_reference {typedef Ty type;};
234
+ template <class Ty> struct remove_reference<Ty&> {typedef Ty type;};
235
+ template <class Ty> struct remove_reference<Ty&&> {typedef Ty type;};
236
+
237
+ template <class Ty>
238
+ using remove_reference_t = typename details::templates::remove_reference<Ty>::type;
239
+
240
+ template <class Ty> struct remove_const {typedef Ty type;};
241
+ template <class Ty> struct remove_const<const Ty> {typedef Ty type;};
242
+
243
+ template <class Ty> struct remove_volatile {typedef Ty type;};
244
+ template <class Ty> struct remove_volatile<volatile Ty> {typedef Ty type;};
245
+
246
+ template <class Ty> struct remove_cv {typedef typename details::templates::remove_volatile<typename details::templates::remove_const<Ty>::type>::type type;};
247
+
248
+ template <class Ty>
249
+ using remove_cv_t = typename details::templates::remove_cv<Ty>::type;
250
+
251
+ template <class Ty>
252
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &t) noexcept {
253
+ return static_cast<Ty&&>(t);
254
+ }
255
+
256
+ template <class Ty>
257
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &&t) noexcept {
258
+ static_assert(!details::templates::is_lvalue_reference<Ty>::value, "Forwarding an rvalue as an lvalue is not allowed.");
259
+ return static_cast<Ty&&>(t);
260
+ }
261
+
262
+ /**
263
+ * is_integral
264
+ **/
265
+ template <class Ty> struct _is_integral : public details::templates::false_type {};
266
+ template <> struct _is_integral<bool> : public details::templates::true_type {};
267
+ template <> struct _is_integral<char> : public details::templates::true_type {};
268
+ template <> struct _is_integral<unsigned char> : public details::templates::true_type {};
269
+ template <> struct _is_integral<short> : public details::templates::true_type {};
270
+ template <> struct _is_integral<unsigned short> : public details::templates::true_type {};
271
+ template <> struct _is_integral<int> : public details::templates::true_type {};
272
+ template <> struct _is_integral<unsigned int> : public details::templates::true_type {};
273
+ template <> struct _is_integral<long> : public details::templates::true_type {};
274
+ template <> struct _is_integral<long long> : public details::templates::true_type {};
275
+ template <> struct _is_integral<unsigned long> : public details::templates::true_type {};
276
+ template <> struct _is_integral<unsigned long long> : public details::templates::true_type {};
277
+ //Vector type support?
278
+
279
+ template <typename Ty>
280
+ struct is_integral : public details::templates::_is_integral<typename details::templates::remove_cv<Ty>::type> {};
281
+
282
+ /**
283
+ * is_floating_point
284
+ **/
285
+ template <class Ty> struct _is_floating_point : public details::templates::false_type {};
286
+ template <> struct _is_floating_point<float> : public details::templates::true_type {};
287
+ template <> struct _is_floating_point<double> : public details::templates::true_type {};
288
+ template <> struct _is_floating_point<long double> : public details::templates::true_type {};
289
+ # ifdef __CUDA_FP16_TYPES_EXIST__
290
+ template <> struct _is_floating_point<__half> : public details::templates::true_type {};
291
+ template <> struct _is_floating_point<__half2> : public details::templates::true_type {};
292
+ # endif
293
+ //Vector type support?
294
+
295
+ template <typename Ty>
296
+ struct is_floating_point : public details::templates::_is_floating_point<typename details::templates::remove_cv<Ty>::type> {};
297
+
298
+ template <class T>
299
+ struct is_arithmetic : details::templates::integral_constant<
300
+ bool,
301
+ details::templates::is_integral<T>::value ||
302
+ details::templates::is_floating_point<T>::value> {};
303
+
304
+ template <typename Ty, bool = details::templates::is_arithmetic<Ty>::value>
305
+ struct _is_unsigned : details::templates::integral_constant<bool, Ty(0) < Ty(-1)> {};
306
+
307
+ template <typename Ty>
308
+ struct _is_unsigned<Ty,false> : details::templates::false_type {};
309
+
310
+ template <typename Ty>
311
+ struct is_unsigned : _is_unsigned<typename details::templates::remove_cv<Ty>::type> {};
312
+
313
+ /**
314
+ * programmatic type traits
315
+ **/
316
+ template<bool B, class Ty = void>
317
+ struct enable_if {};
318
+
319
+ template<class Ty>
320
+ struct enable_if<true, Ty> { typedef Ty type; };
321
+
322
+ template<bool Cond, typename Ty = void>
323
+ using enable_if_t = typename details::templates::enable_if<Cond, Ty>::type;
324
+
325
+ template<class Ty1, class Ty2>
326
+ struct is_same : details::templates::false_type {};
327
+
328
+ template<class Ty>
329
+ struct is_same<Ty, Ty> : details::templates::true_type {};
330
+
331
+ } // templates
332
+ #endif // _CG_CPP11_FEATURES
333
+
334
+ } // details
335
+ _CG_END_NAMESPACE
336
+
337
+
338
+ #endif // _CG_INFO_H_
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_INVOKE_H
51
+ #define _CG_INVOKE_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename Group>
63
+ struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {};
64
+ #ifdef _CG_HAS_INSTR_ELECT
65
+ template<>
66
+ struct _elect_group_supported<coalesced_group> : _CG_STL_NAMESPACE::true_type {};
67
+ template<unsigned int Size, typename Parent>
68
+ struct _elect_group_supported<thread_block_tile<Size, Parent>> :
69
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size <= 32)> {};
70
+ #endif
71
+
72
+ template <typename Group>
73
+ struct elect_group_supported : public _elect_group_supported<details::remove_qual<Group>> {};
74
+
75
+ template<typename Group>
76
+ _CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) {
77
+ int is_leader = 0;
78
+ #ifdef _CG_HAS_INSTR_ELECT
79
+ asm("{\n\t"
80
+ " .reg .pred p;\n\t"
81
+ " elect.sync %0|p, %2;\n\t"
82
+ " @p mov.s32 %1, 1;\n\t"
83
+ "}"
84
+ : "+r"(leader_lane), "+r"(is_leader) : "r" (mask));
85
+ #endif
86
+ return is_leader;
87
+ }
88
+
89
+ template<bool UseElect>
90
+ struct invoke_one_impl {};
91
+
92
+ template<>
93
+ struct invoke_one_impl<true> {
94
+ template<typename Group, typename Fn, typename... Args>
95
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
96
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
97
+ unsigned int leader_lane = 0;
98
+
99
+ if (elect_one(group, mask, leader_lane)) {
100
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
101
+ }
102
+ }
103
+
104
+ template<typename Group, typename Fn, typename... Args>
105
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
106
+ -> typename _CG_STL_NAMESPACE::remove_reference<
107
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
108
+
109
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
110
+ details::remove_qual<ResultType> result;
111
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
112
+ unsigned int leader_lane = 0;
113
+
114
+ if (elect_one(group, mask, leader_lane)) {
115
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
116
+ }
117
+
118
+ // Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank.
119
+ return tile::shuffle_dispatch<ResultType>::shfl(result, mask, leader_lane, 32);
120
+ }
121
+ };
122
+
123
+ template<>
124
+ struct invoke_one_impl<false> {
125
+ template<typename Group, typename Fn, typename... Args>
126
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
127
+ if (group.thread_rank() == 0) {
128
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
129
+ }
130
+ }
131
+
132
+ template<typename Group, typename Fn, typename... Args>
133
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
134
+ -> typename _CG_STL_NAMESPACE::remove_reference<
135
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
136
+
137
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
138
+ details::remove_qual<ResultType> result;
139
+
140
+ if (group.thread_rank() == 0) {
141
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
142
+ }
143
+
144
+ return group.shfl(result, 0);
145
+ }
146
+ };
147
+
148
+
149
+ }; // namespace details
150
+
151
+ template<typename Group, typename Fn, typename... Args>
152
+ _CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
153
+ using impl = details::invoke_one_impl<details::elect_group_supported<Group>::value>;
154
+ impl::invoke_one(group, _CG_STL_NAMESPACE::forward<Fn>(fn), _CG_STL_NAMESPACE::forward<Args>(args)...);
155
+ }
156
+
157
+ template<typename Fn, typename... Args>
158
+ _CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args)
159
+ -> typename _CG_STL_NAMESPACE::remove_reference<
160
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
161
+
162
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
163
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
164
+ "For invocables returning void invoke_one should be used instead");
165
+ using impl = details::invoke_one_impl<details::elect_group_supported<coalesced_group>::value>;
166
+ return impl::invoke_one_broadcast(group,
167
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
168
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
169
+ }
170
+
171
+ template<unsigned int Size, typename Parent, typename Fn, typename... Args>
172
+ _CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile<Size, Parent>& group, Fn&& fn, Args&&... args)
173
+ -> typename _CG_STL_NAMESPACE::remove_reference<
174
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
175
+
176
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
177
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
178
+ "For invocables returning void invoke_one should be used instead");
179
+ using impl = details::invoke_one_impl<details::elect_group_supported<thread_block_tile<Size, Parent>>::value>;
180
+ return impl::invoke_one_broadcast(group,
181
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
182
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
183
+ }
184
+
185
+ _CG_END_NAMESPACE
186
+
187
+ #endif //_CG_CPP11_FEATURES
188
+
189
+ #endif // _CG_INVOKE_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMORY_H_
50
+ # define _COOPERATIVE_GROUPS_MEMORY_H_
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+ namespace details {
58
+ _CG_STATIC_CONST_DECL int scratch_num_reserved_bytes = 12;
59
+
60
+ #if defined(_CG_HAS_RESERVED_SHARED)
61
+ _CG_STATIC_QUALIFIER void* reserved_shared_ptr()
62
+ {
63
+ void *ptr;
64
+ asm ("{\n\t"
65
+ " .reg .u32 start;\n\t"
66
+ " .reg .u64 extended;\n\t"
67
+ " mov.u32 start, %%reserved_smem_offset_1;\n\t"
68
+ " cvt.u64.u32 extended, start;\n\t"
69
+ " cvta.shared.u64 %0, extended;\n\t"
70
+ "}"
71
+ : "=" _CG_ASM_PTR_CONSTRAINT(ptr));
72
+ return ptr;
73
+ }
74
+ #endif
75
+
76
+ struct multi_warp_scratch {
77
+ // One barrier per possible size of the group.
78
+ _CG_STATIC_CONST_DECL unsigned int memory_barriers_count = 5;
79
+ _CG_STATIC_CONST_DECL size_t sync_memory_size = memory_barriers_count * sizeof(barrier_t);
80
+
81
+ using communication_type = unsigned long long;
82
+ _CG_STATIC_CONST_DECL size_t communication_size = sizeof(communication_type);
83
+
84
+ // Layout of the scratch space:
85
+ barrier_t barriers[memory_barriers_count];
86
+ char reserved[scratch_num_reserved_bytes]; // Reserve 12 bytes for future use
87
+ communication_type communication_memory[default_max_block_size / 32];
88
+
89
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int scratch_size_needed(unsigned int max_block_size) {
90
+ // One slot of collectives memory per warp.
91
+ return scratch_num_reserved_bytes + sync_memory_size + max_block_size / 32 * communication_size;
92
+ }
93
+
94
+ _CG_QUALIFIER void init_barriers(unsigned int thread_rank) {
95
+ if (thread_rank < memory_barriers_count) {
96
+ barriers[thread_rank] = 0;
97
+ }
98
+ }
99
+ };
100
+
101
+ #if defined(_CG_HAS_RESERVED_SHARED)
102
+ // CG can expect at least 288 bytes available in reserved shared
103
+ static_assert(sizeof(multi_warp_scratch) <= 288, "multi-warp scratch size is too large");
104
+ #endif
105
+
106
+ // Make sure the structure can fit into the user provided memory
107
+ static_assert(sizeof(multi_warp_scratch) <= multi_warp_scratch::scratch_size_needed(default_max_block_size),
108
+ "multi-warp scratch size is too large");
109
+
110
+
111
+ _CG_QUALIFIER multi_warp_scratch* get_scratch_ptr(void* user_scratch) {
112
+ void *ptr;
113
+ #if defined(_CG_HAS_RESERVED_SHARED)
114
+ ptr = reserved_shared_ptr();
115
+ #else
116
+ ptr = user_scratch;
117
+ #endif
118
+ return static_cast<multi_warp_scratch*>(ptr);
119
+
120
+ }
121
+
122
+ }
123
+
124
+ template <unsigned int MaxBlockSize = details::default_max_block_size>
125
+ struct __align__(details::multi_warp_scratch::communication_size) block_tile_memory {
126
+ private:
127
+ #if !defined(_CG_HAS_RESERVED_SHARED)
128
+ char scratch[details::multi_warp_scratch::scratch_size_needed(MaxBlockSize)];
129
+ #endif
130
+ };
131
+ #endif
132
+
133
+ _CG_END_NAMESPACE
134
+
135
+ #endif /* !_COOPERATIVE_GROUPS_MEMORY_H_ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_PARTITIONING_H
51
+ #define _CG_PARTITIONING_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ _CG_BEGIN_NAMESPACE
57
+
58
+ namespace details {
59
+
60
+ template <typename TyGroup>
61
+ _CG_STATIC_QUALIFIER coalesced_group _binary_partition(const TyGroup &tile, bool pred) {
62
+ const unsigned int fullMask = ~0u;
63
+
64
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
65
+ unsigned int predMask = pred ? 0 : fullMask;
66
+ unsigned int setMask = __ballot_sync(thisMask, pred);
67
+
68
+ if (setMask == thisMask || setMask == 0) {
69
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(thisMask);
70
+ _coalesced_group_data_access::modify_meta_group(subTile, 0, 1);
71
+ return subTile;
72
+ }
73
+ else {
74
+ unsigned int subMask = thisMask & (setMask ^ predMask);
75
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
76
+ _coalesced_group_data_access::modify_meta_group(subTile, pred, 2);
77
+ return subTile;
78
+ }
79
+ }
80
+
81
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
82
+ template <typename TyGroup, typename TyPredicate>
83
+ _CG_STATIC_QUALIFIER coalesced_group _labeled_partition(const TyGroup &tile, TyPredicate pred) {
84
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
85
+ unsigned int thisBias = __ffs(thisMask) - 1; // Subtract 1 to index properly from [1-32]
86
+ unsigned int subMask = __match_any_sync(thisMask, pred);
87
+
88
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
89
+
90
+ int leaderLaneId = subTile.shfl(details::laneid(), 0);
91
+
92
+ bool isLeader = !subTile.thread_rank();
93
+ unsigned int leaderMask = __ballot_sync(thisMask, isLeader);
94
+ unsigned int tileRank = __fns(leaderMask, leaderLaneId, 0) - thisBias;
95
+
96
+ _coalesced_group_data_access::modify_meta_group(subTile, tileRank, __popc(leaderMask));
97
+
98
+ return subTile;
99
+ }
100
+ #endif
101
+ }; // namespace details
102
+
103
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const coalesced_group &tile, bool pred) {
104
+ return details::_binary_partition(tile, pred);
105
+ }
106
+
107
+ template <unsigned int Size, typename ParentT>
108
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const thread_block_tile<Size, ParentT> &tile, bool pred) {
109
+ #ifdef _CG_CPP11_FEATURES
110
+ static_assert(Size <= 32, "Binary partition is available only for tiles of size smaller or equal to 32");
111
+ #endif
112
+ return details::_binary_partition(tile, pred);
113
+ }
114
+
115
+
116
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
117
+ template <typename TyPredicate>
118
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const coalesced_group &tile, TyPredicate pred) {
119
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value, "labeled_partition predicate must be an integral type");
120
+ return details::_labeled_partition(tile, pred);
121
+ }
122
+
123
+ template <typename TyPredicate, unsigned int Size, typename ParentT>
124
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const thread_block_tile<Size, ParentT> &tile, TyPredicate pred) {
125
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value, "labeled_partition predicate must be an integral type");
126
+ static_assert(Size <= 32, "Labeled partition is available only for tiles of size smaller or equal to 32");
127
+ return details::_labeled_partition(tile, pred);
128
+ }
129
+ #endif
130
+
131
+ _CG_END_NAMESPACE
132
+
133
+ #endif // _CG_PARTITIONING_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_REDUCE_H_
50
+ #define _CG_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "coalesced_reduce.h"
55
+ #include "functional.h"
56
+ #include "cooperative_groups.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <class Ty>
63
+ using _redux_is_add_supported = _CG_STL_NAMESPACE::integral_constant<
64
+ bool,
65
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) <= 4)>;
66
+
67
+ template <class Ty>
68
+ using redux_is_add_supported = _redux_is_add_supported<Ty>;
69
+
70
+ // A specialization for 64 bit logical operations is possible
71
+ // but for now only accelerate 32 bit bitwise ops
72
+ template <class Ty>
73
+ using redux_is_logical_supported = redux_is_add_supported<Ty>;
74
+
75
+ // Base operator support case
76
+ template <class TyOp, class Ty> struct _redux_op_supported : public _CG_STL_NAMESPACE::false_type {};
77
+ #ifdef _CG_HAS_OP_REDUX
78
+ template <class Ty> struct _redux_op_supported<cooperative_groups::plus<Ty>, Ty> : public redux_is_add_supported<Ty> {};
79
+ template <class Ty> struct _redux_op_supported<cooperative_groups::less<Ty>, Ty> : public redux_is_add_supported<Ty> {};
80
+ template <class Ty> struct _redux_op_supported<cooperative_groups::greater<Ty>, Ty> : public redux_is_add_supported<Ty> {};
81
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_and<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
82
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_or<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
83
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_xor<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
84
+ #endif
85
+
86
+ template <class Ty, template <class> class TyOp>
87
+ using redux_op_supported = _redux_op_supported<
88
+ typename details::remove_qual<TyOp<Ty>>,
89
+ Ty>;
90
+
91
+ // Groups smaller than 16 actually have worse performance characteristics when used with redux
92
+ // tiles of size 16 and 32 perform the same or better and have better code generation profiles
93
+ template <class TyGroup> struct _redux_group_optimized : public _CG_STL_NAMESPACE::false_type {};
94
+
95
+ template <unsigned int Sz, typename TyPar>
96
+ struct _redux_group_optimized<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
97
+ bool,
98
+ (Sz >= 16)> {};
99
+ template <unsigned int Sz, typename TyPar>
100
+ struct _redux_group_optimized<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
101
+ bool,
102
+ (Sz >= 16)> {};
103
+ template <>
104
+ struct _redux_group_optimized<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
105
+
106
+ template <typename TyGroup>
107
+ using redux_group_optimized = _redux_group_optimized<details::remove_qual<TyGroup>>;
108
+
109
+ template <template <class> class TyOp>
110
+ _CG_STATIC_QUALIFIER int pick_redux(int mask, int val);
111
+ template <template <class> class TyOp>
112
+ _CG_STATIC_QUALIFIER unsigned int pick_redux(int mask, unsigned int val);
113
+
114
+ #ifdef _CG_HAS_OP_REDUX
115
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::plus>(int mask, int val) {
116
+ return __reduce_add_sync(mask, val);
117
+ }
118
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::less>(int mask, int val) {
119
+ return __reduce_min_sync(mask, val);
120
+ }
121
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::greater>(int mask, int val) {
122
+ return __reduce_max_sync(mask, val);
123
+ }
124
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_and>(int mask, int val) {
125
+ return __reduce_and_sync(mask, val);
126
+ }
127
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_xor>(int mask, int val) {
128
+ return __reduce_xor_sync(mask, val);
129
+ }
130
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_or>(int mask, int val) {
131
+ return __reduce_or_sync(mask, val);
132
+ }
133
+
134
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::plus>(int mask, unsigned int val) {
135
+ return __reduce_add_sync(mask, val);
136
+ }
137
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::less>(int mask, unsigned int val) {
138
+ return __reduce_min_sync(mask, val);
139
+ }
140
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::greater>(int mask, unsigned int val) {
141
+ return __reduce_max_sync(mask, val);
142
+ }
143
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_and>(int mask, unsigned int val) {
144
+ return __reduce_and_sync(mask, val);
145
+ }
146
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_xor>(int mask, unsigned int val) {
147
+ return __reduce_xor_sync(mask, val);
148
+ }
149
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_or>(int mask, unsigned int val) {
150
+ return __reduce_or_sync(mask, val);
151
+ }
152
+ #endif
153
+
154
+
155
+ template <typename TyVal, bool = _CG_STL_NAMESPACE::is_unsigned<TyVal>::value>
156
+ struct _accelerated_op;
157
+
158
+ // Signed type redux intrinsic dispatch
159
+ template <typename TyVal>
160
+ struct _accelerated_op<TyVal, false> {
161
+ template <template <class> class TyOp>
162
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
163
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<int>(val)));
164
+ }
165
+ };
166
+
167
+ // Unsigned type redux intrinsic dispatch
168
+ template <typename TyVal>
169
+ struct _accelerated_op<TyVal, true> {
170
+ template <template <class> class TyOp>
171
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
172
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<unsigned int>(val)));
173
+ }
174
+ };
175
+
176
+ template <typename TyVal>
177
+ using accelerated_op = _accelerated_op<TyVal>;
178
+
179
+
180
+ template <typename TyVal, typename TyFnInput, typename TyGroup>
181
+ class _redux_dispatch {
182
+ template <class Ty, template <class> class TyOp>
183
+ using _redux_is_usable = _CG_STL_NAMESPACE::integral_constant<bool,
184
+ redux_op_supported<Ty, TyOp>::value &&
185
+ redux_group_optimized<TyGroup>::value>;
186
+
187
+ template <class Ty, template <class> class TyOp>
188
+ using redux_is_usable = typename _CG_STL_NAMESPACE::enable_if<_redux_is_usable<Ty, TyOp>::value, void>::type*;
189
+
190
+ template <class Ty, template <class> class TyOp>
191
+ using redux_is_not_usable = typename _CG_STL_NAMESPACE::enable_if<!_redux_is_usable<Ty, TyOp>::value, void>::type*;
192
+
193
+ public:
194
+ // Dispatch to redux if the combination of op and args are supported
195
+ template<
196
+ template <class> class TyOp,
197
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
198
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
199
+ // Retrieve the mask for the group and dispatch to redux
200
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
201
+ }
202
+
203
+ template<
204
+ template <class> class TyOp,
205
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
206
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
207
+ // Retrieve the mask for the group and dispatch to redux
208
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
209
+ }
210
+
211
+ // Fallback shuffle sync reduction
212
+ template <
213
+ template <class> class TyOp,
214
+ redux_is_not_usable<TyFnInput, TyOp> = nullptr>
215
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
216
+ //Dispatch to fallback shuffle sync accelerated reduction
217
+ return coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
218
+ }
219
+
220
+ };
221
+
222
+ // Group support for reduce.
223
+ template <class TyGroup> struct _reduce_group_supported : public _CG_STL_NAMESPACE::false_type {};
224
+
225
+ template <unsigned int Sz, typename TyPar>
226
+ struct _reduce_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
227
+ template <unsigned int Sz, typename TyPar>
228
+ struct _reduce_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
229
+ template <>
230
+ struct _reduce_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
231
+
232
+ template <typename TyGroup>
233
+ using reduce_group_supported = _reduce_group_supported<details::remove_qual<TyGroup>>;
234
+
235
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
236
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
237
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
238
+
239
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
240
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
241
+ }
242
+
243
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
244
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
245
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
246
+
247
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
248
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
249
+ }
250
+
251
+
252
+ template <typename TyVal, typename TyOp, typename TyGroup>
253
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
254
+ return details::coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
255
+ }
256
+
257
+ template <unsigned int GroupId>
258
+ struct tile_reduce_dispatch;
259
+
260
+ template <>
261
+ struct tile_reduce_dispatch<details::coalesced_group_id> {
262
+ template <typename TyGroup, typename TyVal, typename TyFn>
263
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
264
+ return details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
265
+ }
266
+ };
267
+
268
+ #if defined(_CG_CPP11_FEATURES)
269
+ template <>
270
+ struct tile_reduce_dispatch<details::multi_tile_group_id> {
271
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
272
+ _CG_STATIC_QUALIFIER auto reduce(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
273
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
274
+ using TyRet = details::remove_qual<TyVal>;
275
+ const unsigned int num_warps = Size / 32;
276
+
277
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
278
+ *warp_scratch_location =
279
+ details::reduce(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
280
+ };
281
+ auto inter_warp_lambda =
282
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
283
+ *thread_scratch_location =
284
+ details::reduce(subwarp, *thread_scratch_location, _CG_STL_NAMESPACE::forward<TyFn>(op));
285
+ };
286
+ return details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
287
+ }
288
+ };
289
+
290
+ template <unsigned int GroupId>
291
+ struct tile_async_reduce_dispatch;
292
+
293
+ template <>
294
+ struct tile_async_reduce_dispatch<details::coalesced_group_id> {
295
+ template <unsigned int TySize, typename ParentT, typename TyDst, typename TyVal, typename TyFn, typename TyResHandler>
296
+ _CG_STATIC_QUALIFIER void reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group, TyDst& dst, TyVal&& val, TyFn&& op, TyResHandler& res_handler) {
297
+ // Do regular, in group reduction
298
+ auto result = details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
299
+
300
+ // One thread stores/updates the destination
301
+ if (group.thread_rank() == 0) {
302
+ res_handler(result);
303
+ }
304
+ }
305
+ template <typename TyDst, typename TyVal, typename TyFn, typename TyResHandler>
306
+ _CG_STATIC_QUALIFIER void reduce(const coalesced_group& group, TyDst& dst, TyVal&& val, TyFn&& op, TyResHandler& res_handler) {
307
+ // Do in group reduction to the last thread
308
+ auto result = details::coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
309
+
310
+ // One thread stores/updates the destination
311
+ if (group.thread_rank() == group.size() - 1) {
312
+ res_handler(result);
313
+ }
314
+ }
315
+ };
316
+
317
+ template <>
318
+ struct tile_async_reduce_dispatch<details::multi_tile_group_id> {
319
+ template <unsigned int TySize, typename ParentT, typename TyDst, typename TyInputVal, typename TyFn, typename TyResHandler>
320
+ _CG_STATIC_QUALIFIER void reduce(const thread_block_tile<TySize, ParentT>& group, TyDst& dst, TyInputVal&& val, TyFn&& op, TyResHandler& res_handler) {
321
+ using TyVal = remove_qual<TyInputVal>;
322
+ const unsigned int num_warps = TySize / 32;
323
+ details::barrier_t* sync_location = multi_warp_sync_location_getter(group);
324
+ auto warp_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, group.thread_rank() / 32);
325
+
326
+ // Do in warp reduce
327
+ auto warp = details::tiled_partition_internal<32, thread_block_tile<TySize, ParentT>>();
328
+ *warp_scratch_location = details::reduce(warp, _CG_STL_NAMESPACE::forward<TyInputVal>(val), op);
329
+
330
+ // Tile of size num_warps from the last warp to arrive does final reduction step
331
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), num_warps)) {
332
+ auto subwarp = details::tiled_partition_internal<num_warps, decltype(warp)>();
333
+ if (subwarp.meta_group_rank() == 0) {
334
+ auto thread_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, subwarp.thread_rank());
335
+ auto thread_val = *thread_scratch_location;
336
+ // Release other warps, we read their contribution already.
337
+ subwarp.sync();
338
+ details::sync_warps_release(sync_location, subwarp.thread_rank() == 0, details::cta::thread_rank(), num_warps);
339
+ TyVal result = details::reduce(subwarp, thread_val, op);
340
+ // One thread stores the result or updates the atomic
341
+ if (subwarp.thread_rank() == 0) {
342
+ res_handler(result);
343
+ }
344
+ }
345
+ warp.sync();
346
+ }
347
+ }
348
+ };
349
+ #endif
350
+
351
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
352
+ _CG_QUALIFIER void check_reduce_params() {
353
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
354
+ static_assert(details::reduce_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
355
+ };
356
+
357
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
358
+ _CG_QUALIFIER void check_async_reduce_params() {
359
+ check_reduce_params<TyGroup, TyInputVal, TyRetVal>();
360
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
361
+ }
362
+ } // details
363
+
364
+ template <typename TyGroup, typename TyVal, typename TyFn>
365
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
366
+ details::check_reduce_params<TyGroup, details::remove_qual<TyVal>, decltype(op(val, val))>();
367
+
368
+ using dispatch = details::tile_reduce_dispatch<TyGroup::_group_id>;
369
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
370
+ }
371
+
372
+ #if defined(_CG_CPP11_FEATURES)
373
+
374
+ # if defined(_CG_HAS_STL_ATOMICS)
375
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
376
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
377
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
378
+ auto update_lambda = [&] (TyVal& result) {
379
+ details::atomic_update(dst, result, op);
380
+ };
381
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
382
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
383
+ }
384
+
385
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
386
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
387
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
388
+ auto update_lambda = [&] (TyVal& result) {
389
+ details::atomic_update(dst, result, op);
390
+ };
391
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
392
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
393
+ }
394
+
395
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
396
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
397
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
398
+ auto store_lambda = [&] (TyVal& result) {
399
+ details::atomic_store(dst, result);
400
+ };
401
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
402
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
403
+ }
404
+
405
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
406
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
407
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
408
+ auto store_lambda = [&] (TyVal& result) {
409
+ details::atomic_store(dst, result);
410
+ };
411
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
412
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
413
+ }
414
+ # endif
415
+
416
+ template<typename TyGroup, typename TyVal, typename TyInputVal, typename TyFn>
417
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, TyVal* dst, TyInputVal&& val, TyFn&& op) {
418
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
419
+ auto store_lambda = [&] (TyVal& result) {
420
+ *dst = result;
421
+ };
422
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
423
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
424
+ }
425
+ #endif
426
+
427
+ _CG_END_NAMESPACE
428
+
429
+ #endif // _CG_REDUCE_H_
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_SCAN_H_
50
+ #define _CG_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "functional.h"
55
+ #include "coalesced_scan.h"
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+
61
+ // Group support for scan.
62
+ template <class TyGroup> struct _scan_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _scan_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
66
+ template <unsigned int Sz, typename TyPar>
67
+ struct _scan_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
68
+ template <>
69
+ struct _scan_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
70
+
71
+ template <typename TyGroup>
72
+ using scan_group_supported = _scan_group_supported<details::remove_qual<TyGroup>>;
73
+
74
+ template <bool IsIntegralPlus>
75
+ struct integral_optimized_scan;
76
+
77
+ enum class ScanType { exclusive, inclusive };
78
+
79
+ template <unsigned int GroupId, ScanType TyScan>
80
+ struct scan_dispatch;
81
+
82
+ template <ScanType TyScan>
83
+ struct scan_dispatch<details::coalesced_group_id, TyScan> {
84
+ template <typename TyGroup, typename TyVal, typename TyFn>
85
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
86
+ auto scan_result = coalesced_inclusive_scan(group, val, op);
87
+ if (TyScan == ScanType::exclusive) {
88
+ scan_result = convert_inclusive_to_exclusive(group,
89
+ scan_result,
90
+ _CG_STL_NAMESPACE::forward<TyVal>(val),
91
+ _CG_STL_NAMESPACE::forward<TyFn>(op));
92
+ }
93
+ return scan_result;
94
+ }
95
+ };
96
+
97
+ #if defined(_CG_CPP11_FEATURES)
98
+ template <ScanType TyScan>
99
+ struct scan_dispatch<details::multi_tile_group_id, TyScan> {
100
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
101
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
102
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
103
+ using TyRet = details::remove_qual<TyVal>;
104
+ const unsigned int num_warps = Size / 32;
105
+ // In warp scan result, calculated in warp_lambda
106
+ TyRet warp_scan;
107
+
108
+ // In warp scan, put sum in the warp_scratch_location
109
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
110
+ warp_scan =
111
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
112
+ if (warp.thread_rank() + 1 == warp.size()) {
113
+ *warp_scratch_location = warp_scan;
114
+ }
115
+ if (TyScan == ScanType::exclusive) {
116
+ warp_scan = warp.shfl_up(warp_scan, 1);
117
+ }
118
+ };
119
+
120
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
121
+ // to its in-warp scan result
122
+ auto inter_warp_lambda =
123
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
124
+ auto thread_val = *thread_scratch_location;
125
+ auto result = coalesced_inclusive_scan(subwarp, thread_val, op);
126
+ *thread_scratch_location = convert_inclusive_to_exclusive(subwarp, result, thread_val, op);
127
+ };
128
+
129
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
130
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
131
+ return previous_warps_sum;
132
+ }
133
+ if (warpType::meta_group_rank() == 0) {
134
+ return warp_scan;
135
+ }
136
+ else {
137
+ return op(warp_scan, previous_warps_sum);
138
+ }
139
+ }
140
+ };
141
+
142
+ #if defined(_CG_HAS_STL_ATOMICS)
143
+ template <unsigned int GroupId, ScanType TyScan>
144
+ struct scan_update_dispatch;
145
+
146
+ template <ScanType TyScan>
147
+ struct scan_update_dispatch<details::coalesced_group_id, TyScan> {
148
+ template <typename TyGroup, typename TyAtomic, typename TyVal, typename TyFn>
149
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
150
+ details::remove_qual<TyVal> old;
151
+
152
+ // Do regular in group scan
153
+ auto scan_result = details::coalesced_inclusive_scan(group, val, op);
154
+
155
+ // Last thread updates the atomic and distributes its old value to other threads
156
+ if (group.thread_rank() == group.size() - 1) {
157
+ old = atomic_update(dst, scan_result, _CG_STL_NAMESPACE::forward<TyFn>(op));
158
+ }
159
+ old = group.shfl(old, group.size() - 1);
160
+ if (TyScan == ScanType::exclusive) {
161
+ scan_result = convert_inclusive_to_exclusive(group, scan_result, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
162
+ }
163
+ scan_result = op(old, scan_result);
164
+ return scan_result;
165
+ }
166
+ };
167
+
168
+ template <ScanType TyScan>
169
+ struct scan_update_dispatch<details::multi_tile_group_id, TyScan> {
170
+ template <unsigned int Size, typename ParentT, typename TyAtomic, typename TyVal, typename TyFn>
171
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
172
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
173
+ using TyRet = details::remove_qual<TyVal>;
174
+ const unsigned int num_warps = Size / 32;
175
+ // In warp scan result, calculated in warp_lambda
176
+ TyRet warp_scan;
177
+
178
+ // In warp scan, put sum in the warp_scratch_location
179
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
180
+ warp_scan =
181
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
182
+ if (warp.thread_rank() + 1 == warp.size()) {
183
+ *warp_scratch_location = warp_scan;
184
+ }
185
+ if (TyScan == ScanType::exclusive) {
186
+ warp_scan = warp.shfl_up(warp_scan, 1);
187
+ }
188
+ };
189
+
190
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
191
+ // to its in-warp scan result
192
+ auto inter_warp_lambda =
193
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
194
+ auto thread_val = *thread_scratch_location;
195
+ auto scan_result = details::coalesced_inclusive_scan(subwarp, thread_val, op);
196
+ TyRet offset;
197
+ // Single thread does the atomic update with sum of all contributions and reads the old value.
198
+ if (subwarp.thread_rank() == subwarp.size() - 1) {
199
+ offset = details::atomic_update(dst, scan_result, op);
200
+ }
201
+ offset = subwarp.shfl(offset, subwarp.size() - 1);
202
+ scan_result = convert_inclusive_to_exclusive(subwarp, scan_result, thread_val, op);
203
+ // Add offset read from the atomic to the scanned warp sum.
204
+ // Skipping first thread, since it got defautly constructed value from the conversion,
205
+ // it should just return the offset received from the thread that did the atomic update.
206
+ if (subwarp.thread_rank() != 0) {
207
+ offset = op(scan_result, offset);
208
+ }
209
+ *thread_scratch_location = offset;
210
+ };
211
+
212
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
213
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
214
+ return previous_warps_sum;
215
+ }
216
+ return op(warp_scan, previous_warps_sum);
217
+ }
218
+ };
219
+ #endif
220
+ #endif
221
+
222
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
223
+ _CG_QUALIFIER void check_scan_params() {
224
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
225
+ static_assert(details::scan_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
226
+ }
227
+
228
+ #if defined(_CG_HAS_STL_ATOMICS)
229
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
230
+ _CG_QUALIFIER void check_scan_update_params() {
231
+ check_scan_params<TyGroup, TyInputVal, TyRetVal>();
232
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
233
+ }
234
+ #endif
235
+
236
+ } // details
237
+
238
+ template <typename TyGroup, typename TyVal, typename TyFn>
239
+ _CG_QUALIFIER auto inclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
240
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
241
+
242
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
243
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
244
+ }
245
+
246
+ template <typename TyGroup, typename TyVal>
247
+ _CG_QUALIFIER details::remove_qual<TyVal> inclusive_scan(const TyGroup& group, TyVal&& val) {
248
+ return inclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
249
+ }
250
+
251
+ template <typename TyGroup, typename TyVal, typename TyFn>
252
+ _CG_QUALIFIER auto exclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
253
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
254
+
255
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
256
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
257
+ }
258
+
259
+ template <typename TyGroup, typename TyVal>
260
+ _CG_QUALIFIER details::remove_qual<TyVal> exclusive_scan(const TyGroup& group, TyVal&& val) {
261
+ return exclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
262
+ }
263
+
264
+ #if defined(_CG_HAS_STL_ATOMICS)
265
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
266
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
267
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
268
+
269
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
270
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
271
+ }
272
+
273
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
274
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco> & dst, TyInputVal&& val) {
275
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
276
+ }
277
+
278
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
279
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
280
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
281
+
282
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
283
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
284
+ }
285
+
286
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
287
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val) {
288
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
289
+ }
290
+
291
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
292
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
293
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
294
+
295
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
296
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
297
+ }
298
+
299
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
300
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco> & dst, TyInputVal&& val) {
301
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
302
+ }
303
+
304
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
305
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
306
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
307
+
308
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
309
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
310
+ }
311
+
312
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
313
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val) {
314
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
315
+ }
316
+ #endif
317
+
318
+ _CG_END_NAMESPACE
319
+
320
+ #endif // _CG_SCAN_H_
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_GRID_H
50
+ #define _CG_GRID_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details
57
+ {
58
+
59
+ typedef unsigned int barrier_t;
60
+
61
+ _CG_STATIC_QUALIFIER bool bar_has_flipped(unsigned int old_arrive, unsigned int current_arrive) {
62
+ return (((old_arrive ^ current_arrive) & 0x80000000) != 0);
63
+ }
64
+
65
+ _CG_STATIC_QUALIFIER void sync_grids(unsigned int expected, volatile barrier_t *arrived) {
66
+ bool cta_master = (threadIdx.x + threadIdx.y + threadIdx.z == 0);
67
+ bool gpu_master = (blockIdx.x + blockIdx.y + blockIdx.z == 0);
68
+
69
+ __barrier_sync(0);
70
+
71
+ if (cta_master) {
72
+ unsigned int oldArrive;
73
+ unsigned int nb = 1;
74
+ if (gpu_master) {
75
+ nb = 0x80000000 - (expected - 1);
76
+ }
77
+
78
+ #if __CUDA_ARCH__ < 700
79
+ // Fence; barrier update; volatile polling; fence
80
+ __threadfence();
81
+
82
+ oldArrive = atomicAdd((unsigned int*)arrived, nb);
83
+
84
+ while (!bar_has_flipped(oldArrive, *arrived));
85
+
86
+ __threadfence();
87
+ #else
88
+ // Barrier update with release; polling with acquire
89
+ asm volatile("atom.add.release.gpu.u32 %0,[%1],%2;" : "=r"(oldArrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int*)arrived), "r"(nb) : "memory");
90
+
91
+ unsigned int current_arrive;
92
+ do {
93
+ asm volatile("ld.acquire.gpu.u32 %0,[%1];" : "=r"(current_arrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int *)arrived) : "memory");
94
+ } while (!bar_has_flipped(oldArrive, current_arrive));
95
+ #endif
96
+ }
97
+
98
+ __barrier_sync(0);
99
+ }
100
+
101
+ /* - Multi warp groups synchronization routines - */
102
+
103
+ // Need both acquire and release for the last warp, since it won't be able to acquire with red.and
104
+ _CG_STATIC_QUALIFIER unsigned int atom_or_acq_rel_cta(unsigned int *addr, unsigned int val) {
105
+ unsigned int old;
106
+ #if __CUDA_ARCH__ < 700
107
+ __threadfence_block();
108
+ old = atomicOr(addr, val);
109
+ #else
110
+ asm volatile("atom.or.acq_rel.cta.b32 %0,[%1],%2;" : "=r"(old) : _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
111
+ #endif
112
+ return old;
113
+ }
114
+
115
+ // Special case where barrier is arrived, but not waited on
116
+ _CG_STATIC_QUALIFIER void red_or_release_cta(unsigned int *addr, unsigned int val) {
117
+ #if __CUDA_ARCH__ < 700
118
+ __threadfence_block();
119
+ atomicOr(addr, val);
120
+ #else
121
+ asm volatile("red.or.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
122
+ #endif
123
+ }
124
+
125
+ // Usually called by last arriving warp to released other warps, can be relaxed, since or was already acq_rel
126
+ _CG_STATIC_QUALIFIER void red_and_relaxed_cta(unsigned int *addr, unsigned int val) {
127
+ #if __CUDA_ARCH__ < 700
128
+ atomicAnd(addr, val);
129
+ #else
130
+ asm volatile("red.and.relaxed.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
131
+ #endif
132
+ }
133
+
134
+ // Special case of release, where last warp was doing extra work before releasing others, need to be release
135
+ // to ensure that extra work is visible
136
+ _CG_STATIC_QUALIFIER void red_and_release_cta(unsigned int *addr, unsigned int val) {
137
+ #if __CUDA_ARCH__ < 700
138
+ __threadfence_block();
139
+ atomicAnd(addr, val);
140
+ #else
141
+ asm volatile("red.and.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
142
+ #endif
143
+ }
144
+
145
+ // Read the barrier, acquire to ensure all memory operations following the sync are correctly performed after it is released
146
+ _CG_STATIC_QUALIFIER unsigned int ld_acquire_cta(unsigned int *addr) {
147
+ unsigned int val;
148
+ #if __CUDA_ARCH__ < 700
149
+ val = *((volatile unsigned int*) addr);
150
+ __threadfence_block();
151
+ #else
152
+ asm volatile("ld.acquire.cta.u32 %0,[%1];" : "=r"(val) : _CG_ASM_PTR_CONSTRAINT(addr) : "memory");
153
+ #endif
154
+ return val;
155
+ }
156
+
157
+ // Get synchronization bit mask of my thread_block_tile of size num_warps. Thread ranks 0..31 have the first bit assigned to them,
158
+ // thread ranks 32..63 second etc
159
+ // Bit masks are unique for each group, groups of the same size will have the same number of bits set, but on different positions
160
+ _CG_STATIC_QUALIFIER unsigned int get_group_mask(unsigned int thread_rank, unsigned int num_warps) {
161
+ return num_warps == 32 ? ~0 : ((1 << num_warps) - 1) << (num_warps * (thread_rank / (num_warps * 32)));
162
+ }
163
+
164
+ _CG_STATIC_QUALIFIER void barrier_wait(barrier_t *arrived, unsigned int warp_bit) {
165
+ while(ld_acquire_cta(arrived) & warp_bit);
166
+ }
167
+
168
+ // Default blocking sync.
169
+ _CG_STATIC_QUALIFIER void sync_warps(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
170
+ unsigned int warp_id = thread_rank / 32;
171
+ bool warp_master = (thread_rank % 32 == 0);
172
+ unsigned int warp_bit = 1 << warp_id;
173
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
174
+
175
+ __syncwarp(0xFFFFFFFF);
176
+
177
+ if (warp_master) {
178
+ unsigned int old = atom_or_acq_rel_cta(arrived, warp_bit);
179
+ if (((old | warp_bit) & group_mask) == group_mask) {
180
+ red_and_relaxed_cta(arrived, ~group_mask);
181
+ }
182
+ else {
183
+ barrier_wait(arrived, warp_bit);
184
+ }
185
+ }
186
+
187
+ __syncwarp(0xFFFFFFFF);
188
+ }
189
+
190
+ // Blocking sync, except the last arriving warp, that releases other warps, returns to do other stuff first.
191
+ // Warp returning true from this function needs to call sync_warps_release.
192
+ _CG_STATIC_QUALIFIER bool sync_warps_last_releases(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
193
+ unsigned int warp_id = thread_rank / 32;
194
+ bool warp_master = (thread_rank % 32 == 0);
195
+ unsigned int warp_bit = 1 << warp_id;
196
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
197
+
198
+ __syncwarp(0xFFFFFFFF);
199
+
200
+ unsigned int old = 0;
201
+ if (warp_master) {
202
+ old = atom_or_acq_rel_cta(arrived, warp_bit);
203
+ }
204
+ old = __shfl_sync(0xFFFFFFFF, old, 0);
205
+ if (((old | warp_bit) & group_mask) == group_mask) {
206
+ return true;
207
+ }
208
+ barrier_wait(arrived, warp_bit);
209
+
210
+ return false;
211
+ }
212
+
213
+ // Release my group from the barrier.
214
+ _CG_STATIC_QUALIFIER void sync_warps_release(barrier_t *arrived, bool is_master, unsigned int thread_rank, unsigned int num_warps) {
215
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
216
+ if (is_master) {
217
+ red_and_release_cta(arrived, ~group_mask);
218
+ }
219
+ }
220
+
221
+ // Arrive at my group barrier, but don't block or release the barrier, even if every one arrives.
222
+ // sync_warps_release needs to be called by some warp after this one to reset the barrier.
223
+ _CG_STATIC_QUALIFIER void sync_warps_arrive(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
224
+ unsigned int warp_id = thread_rank / 32;
225
+ bool warp_master = (thread_rank % 32 == 0);
226
+ unsigned int warp_bit = 1 << warp_id;
227
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
228
+
229
+ __syncwarp(0xFFFFFFFF);
230
+
231
+ if (warp_master) {
232
+ red_or_release_cta(arrived, warp_bit);
233
+ }
234
+ }
235
+
236
+ // Wait for my warp to be released from the barrier. Warp must have arrived first.
237
+ _CG_STATIC_QUALIFIER void sync_warps_wait(barrier_t *arrived, unsigned int thread_rank) {
238
+ unsigned int warp_id = thread_rank / 32;
239
+ unsigned int warp_bit = 1 << warp_id;
240
+
241
+ barrier_wait(arrived, warp_bit);
242
+ }
243
+
244
+ // Wait for specific warp to arrive at the barrier
245
+ _CG_QUALIFIER void sync_warps_wait_for_specific_warp(barrier_t *arrived, unsigned int wait_warp_id) {
246
+ unsigned int wait_mask = 1 << wait_warp_id;
247
+ while((ld_acquire_cta(arrived) & wait_mask) != wait_mask);
248
+ }
249
+
250
+ // Initialize the bit corresponding to my warp in the barrier
251
+ _CG_QUALIFIER void sync_warps_reset(barrier_t *arrived, unsigned int thread_rank) {
252
+ unsigned int warp_id = thread_rank / 32;
253
+ unsigned int warp_bit = 1 << warp_id;
254
+
255
+ __syncwarp(0xFFFFFFFF);
256
+
257
+ if (thread_rank % 32 == 0) {
258
+ red_and_release_cta(arrived, ~warp_bit);
259
+ }
260
+ // No need to sync after the atomic, there will be a sync of the group that is being partitioned right after this.
261
+ }
262
+
263
+ } // details
264
+
265
+ _CG_END_NAMESPACE
266
+
267
+ #endif // _CG_GRID_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC
50
+ #define _COOPERATIVE_GROUPS_MEMCPY_ASYNC
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/async.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+ #endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_REDUCE_H
50
+ #define _COOPERATIVE_GROUPS_REDUCE_H
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/reduce.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+
63
+ #endif //_COOPERATIVE_GROUPS_REDUCE_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_SCAN_H
50
+ #define _COOPERATIVE_GROUPS_SCAN_H
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/scan.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+
63
+ #endif //_COOPERATIVE_GROUPS_SCAN_H
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAU_H
51
+ #define CUDAVDPAU_H
52
+
53
+ #ifdef CUDA_FORCE_API_VERSION
54
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
55
+ #endif
56
+
57
+ #define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2
58
+
59
+ #ifdef __cplusplus
60
+ extern "C" {
61
+ #endif
62
+
63
+ /**
64
+ * \defgroup CUDA_VDPAU VDPAU Interoperability
65
+ * \ingroup CUDA_DRIVER
66
+ *
67
+ * ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver
68
+ * API (___CURRENT_FILE___) ___ENDMANBRIEF___
69
+ *
70
+ * This section describes the VDPAU interoperability functions of the
71
+ * low-level CUDA driver application programming interface.
72
+ *
73
+ * @{
74
+ */
75
+
76
+ /**
77
+ * \brief Gets the CUDA device associated with a VDPAU device
78
+ *
79
+ * Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
80
+ * applicable.
81
+ *
82
+ * \param pDevice - Device associated with vdpDevice
83
+ * \param vdpDevice - A VdpDevice handle
84
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
85
+ *
86
+ * \return
87
+ * ::CUDA_SUCCESS,
88
+ * ::CUDA_ERROR_DEINITIALIZED,
89
+ * ::CUDA_ERROR_NOT_INITIALIZED,
90
+ * ::CUDA_ERROR_INVALID_CONTEXT,
91
+ * ::CUDA_ERROR_INVALID_VALUE
92
+ * \notefnerr
93
+ *
94
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
95
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
96
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
97
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
98
+ * ::cudaVDPAUGetDevice
99
+ */
100
+ CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
101
+
102
+ /**
103
+ * \brief Create a CUDA context for interoperability with VDPAU
104
+ *
105
+ * Creates a new CUDA context, initializes VDPAU interoperability, and
106
+ * associates the CUDA context with the calling thread. It must be called
107
+ * before performing any other VDPAU interoperability operations. It may fail
108
+ * if the needed VDPAU driver facilities are not available. For usage of the
109
+ * \p flags parameter, see ::cuCtxCreate().
110
+ *
111
+ * \param pCtx - Returned CUDA context
112
+ * \param flags - Options for CUDA context creation
113
+ * \param device - Device on which to create the context
114
+ * \param vdpDevice - The VdpDevice to interop with
115
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
116
+ *
117
+ * \return
118
+ * ::CUDA_SUCCESS,
119
+ * ::CUDA_ERROR_DEINITIALIZED,
120
+ * ::CUDA_ERROR_NOT_INITIALIZED,
121
+ * ::CUDA_ERROR_INVALID_CONTEXT,
122
+ * ::CUDA_ERROR_INVALID_VALUE,
123
+ * ::CUDA_ERROR_OUT_OF_MEMORY
124
+ * \notefnerr
125
+ *
126
+ * \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
127
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
128
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
129
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
130
+ * ::cuVDPAUGetDevice
131
+ */
132
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
133
+
134
+ /**
135
+ * \brief Registers a VDPAU VdpVideoSurface object
136
+ *
137
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by
138
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
139
+ * The surface's intended usage is specified using \p flags, as follows:
140
+ *
141
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
142
+ * resource will be used. It is therefore assumed that this resource will be
143
+ * read from and written to by CUDA. This is the default value.
144
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
145
+ * will not write to this resource.
146
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
147
+ * CUDA will not read from this resource and will write over the
148
+ * entire contents of the resource, so none of the data previously
149
+ * stored in the resource will be preserved.
150
+ *
151
+ * The VdpVideoSurface is presented as an array of subresources that may be
152
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
153
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
154
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
155
+ *
156
+ * \htmlonly
157
+ * <table>
158
+ * <tr><th>VdpChromaType </th><th>arrayIndex</th><th>Size </th><th>Format</th><th>Content </th></tr>
159
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_420</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
160
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
161
+ * <tr> <td>2 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Top-field chroma </td></tr>
162
+ * <tr> <td>3 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
163
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_422</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
164
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
165
+ * <tr> <td>2 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Top-field chroma </td></tr>
166
+ * <tr> <td>3 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
167
+ * </table>
168
+ * \endhtmlonly
169
+ *
170
+ * \latexonly
171
+ * \begin{tabular}{|l|l|l|l|l|}
172
+ * \hline
173
+ * VdpChromaType & arrayIndex & Size & Format & Content \\
174
+ * \hline
175
+ * VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\
176
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
177
+ * & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\
178
+ * & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\
179
+ * \hline
180
+ * VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\
181
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
182
+ * & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\
183
+ * & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\
184
+ * \hline
185
+ * \end{tabular}
186
+ * \endlatexonly
187
+ *
188
+ * \param pCudaResource - Pointer to the returned object handle
189
+ * \param vdpSurface - The VdpVideoSurface to be registered
190
+ * \param flags - Map flags
191
+ *
192
+ * \return
193
+ * ::CUDA_SUCCESS,
194
+ * ::CUDA_ERROR_INVALID_HANDLE,
195
+ * ::CUDA_ERROR_ALREADY_MAPPED,
196
+ * ::CUDA_ERROR_INVALID_CONTEXT,
197
+ * \notefnerr
198
+ *
199
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
200
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
201
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
202
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
203
+ * ::cuVDPAUGetDevice,
204
+ * ::cudaGraphicsVDPAURegisterVideoSurface
205
+ */
206
+ CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
207
+
208
+ /**
209
+ * \brief Registers a VDPAU VdpOutputSurface object
210
+ *
211
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by
212
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
213
+ * The surface's intended usage is specified using \p flags, as follows:
214
+ *
215
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
216
+ * resource will be used. It is therefore assumed that this resource will be
217
+ * read from and written to by CUDA. This is the default value.
218
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
219
+ * will not write to this resource.
220
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
221
+ * CUDA will not read from this resource and will write over the
222
+ * entire contents of the resource, so none of the data previously
223
+ * stored in the resource will be preserved.
224
+ *
225
+ * The VdpOutputSurface is presented as an array of subresources that may be
226
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
227
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
228
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
229
+ *
230
+ * \htmlonly
231
+ * <table>
232
+ * <tr><th>VdpRGBAFormat </th><th>arrayIndex</th><th>Size </th><th>Format </th><th>Content </th></tr>
233
+ * <tr><td>VDP_RGBA_FORMAT_B8G8R8A8 </td><td>0 </td><td>w x h</td><td>ARGB8 </td><td>Entire surface</td></tr>
234
+ * <tr><td>VDP_RGBA_FORMAT_R10G10B10A2</td><td>0 </td><td>w x h</td><td>A2BGR10</td><td>Entire surface</td></tr>
235
+ * </table>
236
+ * \endhtmlonly
237
+ *
238
+ * \latexonly
239
+ * \begin{tabular}{|l|l|l|l|l|}
240
+ * \hline
241
+ * VdpRGBAFormat & arrayIndex & Size & Format & Content \\
242
+ * \hline
243
+ * VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\
244
+ * VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\
245
+ * \hline
246
+ * \end{tabular}
247
+ * \endlatexonly
248
+ *
249
+ * \param pCudaResource - Pointer to the returned object handle
250
+ * \param vdpSurface - The VdpOutputSurface to be registered
251
+ * \param flags - Map flags
252
+ *
253
+ * \return
254
+ * ::CUDA_SUCCESS,
255
+ * ::CUDA_ERROR_INVALID_HANDLE,
256
+ * ::CUDA_ERROR_ALREADY_MAPPED,
257
+ * ::CUDA_ERROR_INVALID_CONTEXT,
258
+ * \notefnerr
259
+ *
260
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
261
+ * ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource,
262
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
263
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
264
+ * ::cuVDPAUGetDevice,
265
+ * ::cudaGraphicsVDPAURegisterOutputSurface
266
+ */
267
+ CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
268
+
269
+ /** @} */ /* END CUDA_VDPAU */
270
+
271
+
272
+ #if defined(__CUDA_API_VERSION_INTERNAL)
273
+ #undef cuVDPAUCtxCreate
274
+
275
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
276
+ #endif /* __CUDA_API_VERSION_INTERNAL */
277
+
278
+ #ifdef __cplusplus
279
+ };
280
+ #endif
281
+
282
+ #endif
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_H_
51
+ # define _CUDA_AWBARRIER_H_
52
+
53
+ # include "cuda_awbarrier_primitives.h"
54
+
55
+ # if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ # endif
58
+
59
+ # if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER)
60
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
61
+ -std=c++11 compiler option.
62
+ # endif
63
+
64
+ _CUDA_AWBARRIER_BEGIN_NAMESPACE
65
+
66
+ class awbarrier {
67
+ public:
68
+ class arrival_token {
69
+ public:
70
+ arrival_token() = default;
71
+ ~arrival_token() = default;
72
+ _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const;
73
+ private:
74
+ _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token);
75
+ uint64_t token;
76
+ friend awbarrier;
77
+ };
78
+ awbarrier() = default;
79
+ awbarrier(const awbarrier&) = delete;
80
+ awbarrier& operator=(const awbarrier&) = delete;
81
+ ~awbarrier() = default;
82
+
83
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive();
84
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop();
85
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles);
86
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait_parity(bool phase, uint32_t hint_cycles);
87
+ _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token);
88
+ _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait();
89
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait(arrival_token token, uint32_t maxSleepNanosec);
90
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait_parity(bool phase, uint32_t maxSleepNanosec);
91
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max();
92
+
93
+ private:
94
+ uint64_t barrier;
95
+ friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count);
96
+ friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier);
97
+ friend class pipeline;
98
+ };
99
+
100
+ _CUDA_AWBARRIER_QUALIFIER
101
+ uint32_t awbarrier::arrival_token::pending_count() const
102
+ {
103
+ const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token);
104
+ #if (__CUDA_ARCH__ >= 900)
105
+ return pending_count;
106
+ #else
107
+ return (pending_count >> 15);
108
+ #endif
109
+ }
110
+
111
+ _CUDA_AWBARRIER_QUALIFIER
112
+ awbarrier::arrival_token::arrival_token(uint64_t token)
113
+ : token(token)
114
+ {
115
+ }
116
+
117
+ _CUDA_AWBARRIER_QUALIFIER
118
+ void init(awbarrier* barrier, uint32_t expected_count)
119
+ {
120
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
121
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT);
122
+
123
+ #if (__CUDA_ARCH__ >= 900)
124
+ const uint32_t init_count = expected_count;
125
+ #else
126
+ const uint32_t init_count = (expected_count << 15) + expected_count;
127
+ #endif
128
+
129
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count);
130
+ }
131
+
132
+ _CUDA_AWBARRIER_QUALIFIER
133
+ void inval(awbarrier* barrier)
134
+ {
135
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
136
+
137
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier);
138
+ }
139
+
140
+ _CUDA_AWBARRIER_QUALIFIER
141
+ awbarrier::arrival_token awbarrier::arrive()
142
+ {
143
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
144
+
145
+ #if (__CUDA_ARCH__ < 900)
146
+ const uint32_t arrive_count = 1 << 15;
147
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<false>(&this->barrier, arrive_count);
148
+ (void)
149
+ #else
150
+ const uint64_t token =
151
+ #endif
152
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(&this->barrier);
153
+
154
+ return arrival_token(token);
155
+ }
156
+
157
+ _CUDA_AWBARRIER_QUALIFIER
158
+ awbarrier::arrival_token awbarrier::arrive_and_drop()
159
+ {
160
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
161
+
162
+ #if (__CUDA_ARCH__ < 900)
163
+ const uint32_t arrive_count = 1 << 15;
164
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<true>(&this->barrier, arrive_count);
165
+ (void)
166
+ #else
167
+ const uint64_t token =
168
+ #endif
169
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(&this->barrier);
170
+
171
+ return arrival_token(token);
172
+ }
173
+
174
+ _CUDA_AWBARRIER_QUALIFIER
175
+ bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles)
176
+ {
177
+ constexpr uint64_t max_busy_wait_cycles = 1024;
178
+ constexpr uint32_t max_sleep_ns = 1 << 20;
179
+
180
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
181
+
182
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
183
+ return true;
184
+ }
185
+
186
+ uint64_t start_cycles = clock64();
187
+ uint64_t elapsed_cycles = 0;
188
+ uint32_t sleep_ns = 32;
189
+ while (elapsed_cycles < hint_cycles) {
190
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
191
+ return true;
192
+ }
193
+
194
+ if (elapsed_cycles > max_busy_wait_cycles) {
195
+ __nanosleep(sleep_ns);
196
+ if (sleep_ns < max_sleep_ns) {
197
+ sleep_ns *= 2;
198
+ }
199
+ }
200
+
201
+ elapsed_cycles = clock64() - start_cycles;
202
+ }
203
+
204
+ return false;
205
+ }
206
+
207
+ _CUDA_AWBARRIER_QUALIFIER
208
+ bool awbarrier::timed_wait_parity(bool phase, uint32_t hint_cycles)
209
+ {
210
+ constexpr uint64_t max_busy_wait_cycles = 1024;
211
+ constexpr uint32_t max_sleep_ns = 1 << 20;
212
+
213
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
214
+
215
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
216
+ return true;
217
+ }
218
+
219
+ uint64_t start_cycles = clock64();
220
+ uint64_t elapsed_cycles = 0;
221
+ uint32_t sleep_ns = 32;
222
+ while (elapsed_cycles < hint_cycles) {
223
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
224
+ return true;
225
+ }
226
+
227
+ if (elapsed_cycles > max_busy_wait_cycles) {
228
+ __nanosleep(sleep_ns);
229
+ if (sleep_ns < max_sleep_ns) {
230
+ sleep_ns *= 2;
231
+ }
232
+ }
233
+
234
+ elapsed_cycles = clock64() - start_cycles;
235
+ }
236
+
237
+ return false;
238
+ }
239
+
240
+ _CUDA_AWBARRIER_QUALIFIER
241
+ bool awbarrier::try_wait(arrival_token token, uint32_t maxSleepNanosec)
242
+ {
243
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
244
+
245
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(&this->barrier, token.token, maxSleepNanosec);
246
+ }
247
+
248
+ _CUDA_AWBARRIER_QUALIFIER
249
+ bool awbarrier::try_wait_parity(bool phase, uint32_t maxSleepNanosec)
250
+ {
251
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
252
+
253
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(&this->barrier, phase, maxSleepNanosec);
254
+ }
255
+
256
+ _CUDA_AWBARRIER_QUALIFIER
257
+ void awbarrier::wait(arrival_token token)
258
+ {
259
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
260
+
261
+ while (!timed_wait(token, ~0u));
262
+ }
263
+
264
+ _CUDA_AWBARRIER_QUALIFIER
265
+ void awbarrier::arrive_and_wait()
266
+ {
267
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
268
+
269
+ this->wait(this->arrive());
270
+ }
271
+
272
+ _CUDA_AWBARRIER_QUALIFIER __host__
273
+ constexpr uint32_t awbarrier::max()
274
+ {
275
+ return _CUDA_AWBARRIER_MAX_COUNT;
276
+ }
277
+
278
+ _CUDA_AWBARRIER_END_NAMESPACE
279
+
280
+ #endif /* !_CUDA_AWBARRIER_H_ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp ADDED
@@ -0,0 +1,1546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_FP8_HPP__)
51
+ #define __CUDA_FP8_HPP__
52
+
53
+ #if !defined(__CUDA_FP8_H__)
54
+ #error "Do not include this file directly. Instead, include cuda_fp8.h."
55
+ #endif
56
+
57
+ /* C++ header for std::memcpy (used for type punning in host-side
58
+ * implementations). When compiling as a CUDA source file memcpy is provided
59
+ * implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
60
+ */
61
+ #if defined(__cplusplus) && !defined(__CUDACC__)
62
+ #include <cstring>
63
+ #elif !defined(__cplusplus) && !defined(__CUDACC__)
64
+ #include <string.h>
65
+ #endif /* defined(__cplusplus) && !defined(__CUDACC__) */
66
+
67
+ /* Set up structure-alignment attribute */
68
+ #if !(defined __CUDA_ALIGN__)
69
+ #if defined(__CUDACC__)
70
+ #define __CUDA_ALIGN__(align) __align__(align)
71
+ #else
72
+ /* Define alignment macro based on compiler type (cannot assume C11 "_Alignas"
73
+ * is available) */
74
+ #if __cplusplus >= 201103L
75
+ #define __CUDA_ALIGN__(n) \
76
+ alignas(n) /* C++11 kindly gives us a keyword for this */
77
+ #else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/
78
+ #if defined(__GNUC__)
79
+ #define __CUDA_ALIGN__(n) __attribute__((aligned(n)))
80
+ #elif defined(_MSC_VER)
81
+ #define __CUDA_ALIGN__(n) __declspec(align(n))
82
+ #else
83
+ #define __CUDA_ALIGN__(n)
84
+ #endif /* defined(__GNUC__) */
85
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
86
+ #endif /* defined(__CUDACC__) */
87
+ #endif /* !(defined __CUDA_ALIGN__) */
88
+
89
+ #if !(defined __CPP_VERSION_AT_LEAST_11_FP8)
90
+ /* need c++11 for explicit operators */
91
+ #define __CUDA_NO_FP8_CONVERSION_OPERATORS__
92
+ #endif
93
+
94
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
95
+ __nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
96
+ const __nv_fp8_interpretation_t fp8_interpretation) {
97
+ unsigned char res;
98
+ unsigned long long int xbits;
99
+
100
+ #if defined(__CUDACC__) || (!defined __cplusplus)
101
+ (void)memcpy(&xbits, &x, sizeof(x));
102
+ #else
103
+ (void)std::memcpy(&xbits, &x, sizeof(x));
104
+ #endif
105
+ unsigned char FP8_MAXNORM;
106
+ unsigned char FP8_MANTISSA_MASK;
107
+ unsigned short int FP8_EXP_BIAS;
108
+ unsigned long long int FP8_SIGNIFICAND_BITS;
109
+ const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL;
110
+ unsigned long long int FP8_MINDENORM_O2;
111
+ unsigned long long int FP8_OVERFLOW_THRESHOLD;
112
+ unsigned long long int FP8_MINNORM;
113
+
114
+ if (fp8_interpretation == __NV_E4M3) {
115
+ FP8_EXP_BIAS = 7U;
116
+ FP8_SIGNIFICAND_BITS = 4ULL;
117
+ FP8_MANTISSA_MASK = 0x7U;
118
+ FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10
119
+ FP8_OVERFLOW_THRESHOLD =
120
+ 0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4
121
+ FP8_MAXNORM = 0x7EU;
122
+ FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6
123
+ } else { //__NV_E5M2
124
+ FP8_EXP_BIAS = 15U;
125
+ FP8_SIGNIFICAND_BITS = 3ULL;
126
+ FP8_MANTISSA_MASK = 0x3U;
127
+ FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17
128
+ FP8_OVERFLOW_THRESHOLD =
129
+ 0x40EE000000000000ULL -
130
+ 1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code
131
+ FP8_MAXNORM = 0x7BU;
132
+ FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14
133
+ }
134
+
135
+ // 1/2 LSB of the target format, positioned in double precision mantissa
136
+ // helpful in midpoints detection during round-to-nearest-even step
137
+ const unsigned long long int FP8_DP_HALF_ULP =
138
+ (unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL);
139
+ // prepare sign bit in target format
140
+ unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U);
141
+ // prepare exponent field in target format
142
+ unsigned char exp =
143
+ (unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) -
144
+ 1023U + FP8_EXP_BIAS);
145
+ // round mantissa to target format width, rounding towards zero
146
+ unsigned char mantissa =
147
+ (unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) &
148
+ FP8_MANTISSA_MASK;
149
+ unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL;
150
+
151
+ if (absx <= FP8_MINDENORM_O2) {
152
+ // zero or underflow
153
+ res = 0U;
154
+ } else if (absx > DP_INF_BITS) {
155
+ // NaN
156
+ if (fp8_interpretation == __NV_E4M3) {
157
+ res = 0x7FU;
158
+ } else {
159
+ // NaN --> QNaN
160
+ res = 0x7EU | mantissa;
161
+ }
162
+ } else if (absx > FP8_OVERFLOW_THRESHOLD) {
163
+ if (saturate == __NV_SATFINITE) {
164
+ res = FP8_MAXNORM;
165
+ } else {
166
+ // __NV_NOSAT
167
+ if (fp8_interpretation == __NV_E4M3) {
168
+ // no Inf in E4M3
169
+ res = 0x7FU; // NaN
170
+ } else {
171
+ res = 0x7CU; // Inf in E5M2
172
+ }
173
+ }
174
+ } else if (absx >= FP8_MINNORM) {
175
+ res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa);
176
+ // rounded-off bits
177
+ unsigned long long int round =
178
+ xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL);
179
+ // round-to-nearest-even adjustment
180
+ if ((round > FP8_DP_HALF_ULP) ||
181
+ ((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) {
182
+ res = (unsigned char)(res + 1U);
183
+ }
184
+ } else // Denormal range
185
+ {
186
+ unsigned char shift = (unsigned char)(1U - exp);
187
+ // add implicit leading bit
188
+ mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U));
189
+ // additional round-off due to denormalization
190
+ res = (unsigned char)(mantissa >> shift);
191
+
192
+ // rounded-off bits, including implicit leading bit
193
+ unsigned long long int round =
194
+ (xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) &
195
+ ((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL);
196
+ // round-to-nearest-even adjustment
197
+ if ((round > (FP8_DP_HALF_ULP << shift)) ||
198
+ ((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) {
199
+ res = (unsigned char)(res + 1U);
200
+ }
201
+ }
202
+
203
+ res |= sign;
204
+
205
+ return (__nv_fp8_storage_t)res;
206
+ }
207
+
208
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
209
+ __nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
210
+ const __nv_fp8_interpretation_t fp8_interpretation) {
211
+ __nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8(
212
+ x.y, saturate, fp8_interpretation);
213
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
214
+ storage = (__nv_fp8x2_storage_t)(storage |
215
+ __nv_cvt_double_to_fp8(
216
+ x.x, saturate, fp8_interpretation));
217
+ return storage;
218
+ }
219
+
220
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
221
+ __nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
222
+ const __nv_fp8_interpretation_t fp8_interpretation) {
223
+ __nv_fp8_storage_t res = 0U;
224
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
225
+ if (saturate == __NV_SATFINITE) {
226
+ __nv_fp8x2_storage_t storage;
227
+ if (fp8_interpretation == __NV_E5M2) {
228
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
229
+ : "=h"(storage)
230
+ : "f"(x), "f"(0.0f));
231
+ } else {
232
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
233
+ : "=h"(storage)
234
+ : "f"(x), "f"(0.0f));
235
+ }
236
+ res = (__nv_fp8_storage_t)storage;
237
+ } else
238
+ #endif
239
+ {
240
+ unsigned int xbits;
241
+ #if defined(__CUDACC__) || (!defined __cplusplus)
242
+ (void)memcpy(&xbits, &x, sizeof(x));
243
+ #else
244
+ (void)std::memcpy(&xbits, &x, sizeof(x));
245
+ #endif
246
+
247
+ // isnan
248
+ if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) {
249
+ // Canonical NaN
250
+ xbits = 0x7FFFFFFFU;
251
+ }
252
+
253
+ float fx;
254
+ #if defined(__CUDACC__) || (!defined __cplusplus)
255
+ (void)memcpy(&fx, &xbits, sizeof(xbits));
256
+ #else
257
+ (void)std::memcpy(&fx, &xbits, sizeof(xbits));
258
+ #endif
259
+
260
+ const double dx = (double)fx;
261
+ res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation);
262
+ }
263
+ return res;
264
+ }
265
+
266
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
267
+ __nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
268
+ const __nv_fp8_interpretation_t fp8_interpretation) {
269
+ __nv_fp8x2_storage_t storage;
270
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
271
+ if (saturate == __NV_SATFINITE) {
272
+ if (fp8_interpretation == __NV_E5M2) {
273
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
274
+ : "=h"(storage)
275
+ : "f"(x.x), "f"(x.y));
276
+ } else {
277
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
278
+ : "=h"(storage)
279
+ : "f"(x.x), "f"(x.y));
280
+ }
281
+ } else
282
+ #endif
283
+ {
284
+ storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8(
285
+ x.y, saturate, fp8_interpretation);
286
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
287
+ storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8(
288
+ x.x, saturate,
289
+ fp8_interpretation));
290
+ }
291
+ return storage;
292
+ }
293
+
294
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
295
+ __internal_halfraw_to_float(const __half_raw x) {
296
+ float f;
297
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
298
+ asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x));
299
+ #else
300
+ const unsigned int ux = (unsigned int)x.x;
301
+ unsigned int sign = (ux >> 15U) & 1U;
302
+ unsigned int exponent = (ux >> 10U) & 0x1fU;
303
+ unsigned int mantissa = (ux & 0x3ffU) << 13U;
304
+ if (exponent == 0x1fU) { /* NaN or Inf */
305
+ /* discard sign of a NaN */
306
+ sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
307
+ mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
308
+ exponent = 0xffU;
309
+ } else if (exponent == 0U) { /* Denorm or Zero */
310
+ if (mantissa != 0U) {
311
+ unsigned int msb;
312
+ exponent = 0x71U;
313
+ do {
314
+ msb = (mantissa & 0x400000U);
315
+ mantissa <<= 1U; /* normalize */
316
+ --exponent;
317
+ } while (msb == 0U);
318
+ mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
319
+ }
320
+ } else {
321
+ exponent += 0x70U;
322
+ }
323
+ const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
324
+ #if defined(__CUDACC__) || (!defined __cplusplus)
325
+ (void)memcpy(&f, &u, sizeof(u));
326
+ #else
327
+ (void)std::memcpy(&f, &u, sizeof(u));
328
+ #endif
329
+ #endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */
330
+ return f;
331
+ }
332
+
333
+ __CUDA_HOSTDEVICE_FP8_DECL__ float2
334
+ __internal_halfraw2_to_float2(const __half2_raw x) {
335
+ __half_raw raw;
336
+ float2 res;
337
+ raw.x = x.x;
338
+ res.x = __internal_halfraw_to_float(raw);
339
+ raw.x = x.y;
340
+ res.y = __internal_halfraw_to_float(raw);
341
+ return res;
342
+ }
343
+
344
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
345
+ __nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
346
+ const __nv_fp8_interpretation_t fp8_interpretation) {
347
+ __nv_fp8_storage_t res = 0U;
348
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
349
+ if (saturate == __NV_SATFINITE) {
350
+ unsigned int half2_storage = (unsigned int)(x.x);
351
+ __nv_fp8x2_storage_t tmp;
352
+ if (fp8_interpretation == __NV_E5M2) {
353
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
354
+ : "=h"(tmp)
355
+ : "r"(half2_storage));
356
+ } else {
357
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
358
+ : "=h"(tmp)
359
+ : "r"(half2_storage));
360
+ }
361
+ res = (__nv_fp8_storage_t)tmp;
362
+ } else
363
+ #endif
364
+ {
365
+ float fx = __internal_halfraw_to_float(x);
366
+ res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
367
+ }
368
+ return res;
369
+ }
370
+
371
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
372
+ const __half2_raw x, const __nv_saturation_t saturate,
373
+ const __nv_fp8_interpretation_t fp8_interpretation) {
374
+ __nv_fp8x2_storage_t tmp;
375
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
376
+ if (saturate == __NV_SATFINITE) {
377
+ unsigned int half2_storage;
378
+ (void)memcpy(&half2_storage, &x, sizeof(x));
379
+
380
+ if (fp8_interpretation == __NV_E5M2) {
381
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
382
+ : "=h"(tmp)
383
+ : "r"(half2_storage));
384
+ } else {
385
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
386
+ : "=h"(tmp)
387
+ : "r"(half2_storage));
388
+ }
389
+ } else
390
+ #endif
391
+ {
392
+ __half_raw raw;
393
+ raw.x = x.x;
394
+ __nv_fp8_storage_t lo =
395
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
396
+ raw.x = x.y;
397
+ __nv_fp8_storage_t hi =
398
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
399
+ tmp = hi;
400
+ tmp = (__nv_fp8x2_storage_t)(tmp << 8U);
401
+ tmp = (__nv_fp8x2_storage_t)(tmp | lo);
402
+ }
403
+ return tmp;
404
+ }
405
+
406
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
407
+ __internal_bf16raw_to_float(const __nv_bfloat16_raw x) {
408
+ const unsigned int ux = ((unsigned int)x.x) << 16U;
409
+ float fx;
410
+ #if defined(__CUDACC__) || (!defined __cplusplus)
411
+ (void)memcpy(&fx, &ux, sizeof(ux));
412
+ #else
413
+ (void)std::memcpy(&fx, &ux, sizeof(ux));
414
+ #endif
415
+ return fx;
416
+ }
417
+
418
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw
419
+ __internal_float_to_bf16raw_rz(const float x) {
420
+ unsigned int ux;
421
+ __nv_bfloat16_raw r;
422
+ #if defined(__CUDACC__) || (!defined __cplusplus)
423
+ (void)memcpy(&ux, &x, sizeof(x));
424
+ #else
425
+ (void)std::memcpy(&ux, &x, sizeof(x));
426
+ #endif
427
+ r.x = (unsigned short int)(ux >> 16U);
428
+ return r;
429
+ }
430
+
431
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
432
+ const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
433
+ const __nv_fp8_interpretation_t fp8_interpretation) {
434
+ const float fx = __internal_bf16raw_to_float(x);
435
+ const __nv_fp8_storage_t res =
436
+ __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
437
+ return res;
438
+ }
439
+
440
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
441
+ __nv_cvt_bfloat16raw2_to_fp8x2(
442
+ const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
443
+ const __nv_fp8_interpretation_t fp8_interpretation) {
444
+ __nv_bfloat16_raw raw;
445
+ raw.x = x.y;
446
+ __nv_fp8x2_storage_t storage =
447
+ (__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate,
448
+ fp8_interpretation);
449
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
450
+ raw.x = x.x;
451
+ storage = (__nv_fp8x2_storage_t)(storage |
452
+ __nv_cvt_bfloat16raw_to_fp8(
453
+ raw, saturate, fp8_interpretation));
454
+ return storage;
455
+ }
456
+
457
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
458
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
459
+ const __nv_fp8_interpretation_t fp8_interpretation);
460
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
461
+ __nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
462
+ const __nv_fp8_interpretation_t fp8_interpretation) {
463
+ __half_raw res;
464
+ res.x = 0U;
465
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
466
+ res.x =
467
+ __nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation)
468
+ .x;
469
+ #else
470
+ unsigned short int ur = (unsigned short int)x;
471
+ ur = (unsigned short int)(ur << 8U);
472
+
473
+ if (fp8_interpretation == __NV_E5M2) {
474
+ if ((ur & 0x7FFFU) > 0x7C00U) {
475
+ /* If NaN, return canonical NaN */
476
+ ur = 0x7FFFU;
477
+ }
478
+ } else { // __NV_E4M3
479
+ unsigned short int sign = ur & 0x8000U;
480
+ unsigned short int exponent =
481
+ (unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U);
482
+ unsigned short int mantissa = (ur & 0x0700U) >> 1U;
483
+ unsigned char absx = 0x7FU & (unsigned char)x;
484
+
485
+ if (absx == 0x7FU) // NaN
486
+ {
487
+ ur = 0x7FFFU; // fp16 canonical NaN, discard sign
488
+ } else if (exponent == 0x2000U) {
489
+ // zero or denormal
490
+ if (mantissa != 0U) {
491
+ // normalize
492
+ mantissa = (unsigned short int)(mantissa << 1U);
493
+ while ((mantissa & 0x0400U) == 0U) {
494
+ mantissa = (unsigned short int)(mantissa << 1U);
495
+ exponent = (unsigned short int)(exponent - 0x0400U);
496
+ }
497
+ // discard implicit leading bit
498
+ mantissa &= 0x03FFU;
499
+ } else { // Zero
500
+ exponent = 0U;
501
+ }
502
+
503
+ ur = (sign | exponent) | mantissa;
504
+ } else {
505
+ ur = (sign | exponent) | mantissa;
506
+ }
507
+ }
508
+ res.x = ur;
509
+ #endif
510
+ return res;
511
+ }
512
+
513
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
514
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
515
+ const __nv_fp8_interpretation_t fp8_interpretation) {
516
+ __half2_raw res;
517
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
518
+ unsigned int half2_storage;
519
+ if (fp8_interpretation == __NV_E5M2) {
520
+ asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
521
+ } else {
522
+ asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
523
+ }
524
+ (void)memcpy(&res, &half2_storage, sizeof(half2_storage));
525
+ #else
526
+ res.x =
527
+ __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x;
528
+ res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U),
529
+ fp8_interpretation)
530
+ .x;
531
+ #endif
532
+ return res;
533
+ }
534
+
535
+ /* All other definitions in this file are only visible to C++ compilers */
536
+ #if defined(__cplusplus)
537
+
538
+ /**
539
+ * \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind.
540
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
541
+ */
542
+
543
+ /**
544
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
545
+ * \brief __nv_fp8_e5m2 datatype
546
+ *
547
+ * \details This structure implements the datatype for handling
548
+ * \p fp8 floating-point numbers of \p e5m2 kind:
549
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
550
+ *
551
+ * The structure implements converting constructors and operators.
552
+ */
553
+ struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 {
554
+ public:
555
+ /**
556
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
557
+ * Storage variable contains the \p fp8 floating-point data.
558
+ */
559
+ __nv_fp8_storage_t __x;
560
+
561
+ /**
562
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
563
+ * Constructor by default.
564
+ */
565
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
566
+ __nv_fp8_e5m2() = default;
567
+ #else
568
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {}
569
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
570
+
571
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
572
+
573
+ /* Construct from wider FP types */
574
+ /* Note we do avoid constructor init-list because of special host/device
575
+ * compilation rules */
576
+
577
+ /**
578
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
579
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
580
+ * behavior for out-of-range values.
581
+ */
582
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) {
583
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
584
+ __NV_SATFINITE, __NV_E5M2);
585
+ }
586
+ /**
587
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
588
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
589
+ * behavior for out-of-range values.
590
+ */
591
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) {
592
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
593
+ __NV_SATFINITE, __NV_E5M2);
594
+ }
595
+ /**
596
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
597
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
598
+ * for out-of-range values.
599
+ */
600
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) {
601
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
602
+ }
603
+ /**
604
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
605
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
606
+ * behavior for out-of-range values.
607
+ */
608
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) {
609
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
610
+ }
611
+
612
+ /* Converts from integral */
613
+
614
+ /**
615
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
616
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
617
+ * __NV_SATFINITE behavior for out-of-range values.
618
+ */
619
+ explicit __CUDA_HOSTDEVICE_FP8__
620
+ __nv_fp8_e5m2(const unsigned short int val) {
621
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
622
+ }
623
+ /**
624
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
625
+ * Constructor from \p unsigned \p int data type, relies on \p
626
+ * __NV_SATFINITE behavior for out-of-range values.
627
+ */
628
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) {
629
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
630
+ }
631
+ /**
632
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
633
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
634
+ * \p __NV_SATFINITE behavior for out-of-range values.
635
+ */
636
+ explicit __CUDA_HOSTDEVICE_FP8__
637
+ __nv_fp8_e5m2(const unsigned long long int val) {
638
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
639
+ }
640
+
641
+ /**
642
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
643
+ * Constructor from \p short \p int data type.
644
+ */
645
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) {
646
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
647
+ }
648
+ /**
649
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
650
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
651
+ * for out-of-range values.
652
+ */
653
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) {
654
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
655
+ }
656
+ /**
657
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
658
+ * Constructor from \p long \p long \p int data type, relies on \p
659
+ * __NV_SATFINITE behavior for out-of-range values.
660
+ */
661
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) {
662
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
663
+ }
664
+
665
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
666
+ /* Widening FP converts */
667
+ /**
668
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
669
+ * Conversion operator to \p __half data type.
670
+ */
671
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
672
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
673
+ }
674
+ /**
675
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
676
+ * Conversion operator to \p float data type.
677
+ */
678
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
679
+ return __internal_halfraw_to_float(
680
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
681
+ }
682
+ /**
683
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
684
+ * Conversion operator to \p __nv_bfloat16 data type.
685
+ */
686
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
687
+ return static_cast<__nv_bfloat16>(
688
+ __internal_float_to_bf16raw_rz(float(*this)));
689
+ }
690
+ /**
691
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
692
+ * Conversion operator to \p double data type.
693
+ */
694
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
695
+ return static_cast<double>(float(*this));
696
+ }
697
+
698
+ /* Convert to integral */
699
+
700
+ /**
701
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
702
+ * Conversion operator to \p unsigned \p char data type.
703
+ * Clamps negative and too large inputs to the output range.
704
+ * \p NaN inputs convert to \p zero.
705
+ */
706
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
707
+ unsigned char i;
708
+ const float f = float(*this);
709
+ const unsigned char max_val = 0xFFU;
710
+ const unsigned char min_val = 0U;
711
+ const unsigned char bits = (*this).__x;
712
+ // saturation fixup
713
+ if ((bits & 0x7FU) > 0x7CU) {
714
+ // NaN
715
+ i = 0;
716
+ } else if (f > static_cast<float>(max_val)) {
717
+ // saturate maximum
718
+ i = max_val;
719
+ } else if (f < static_cast<float>(min_val)) {
720
+ // saturate minimum
721
+ i = min_val;
722
+ } else {
723
+ // normal value
724
+ i = static_cast<unsigned char>(f);
725
+ }
726
+ return i;
727
+ }
728
+ /**
729
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
730
+ * Conversion operator to \p unsigned \p short \p int data type.
731
+ * Clamps negative and too large inputs to the output range.
732
+ * \p NaN inputs convert to \p zero.
733
+ */
734
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
735
+ return __half2ushort_rz(__half(*this));
736
+ }
737
+ /**
738
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
739
+ * Conversion operator to \p unsigned \p int data type.
740
+ * Clamps negative and too large inputs to the output range.
741
+ * \p NaN inputs convert to \p zero.
742
+ */
743
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
744
+ return __half2uint_rz(__half(*this));
745
+ }
746
+ /**
747
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
748
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
749
+ * Clamps negative and too large inputs to the output range.
750
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
751
+ */
752
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
753
+ return __half2ull_rz(__half(*this));
754
+ }
755
+
756
+ /**
757
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
758
+ * Conversion operator to \p signed \p char data type.
759
+ * Clamps too large inputs to the output range.
760
+ * \p NaN inputs convert to \p zero.
761
+ */
762
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
763
+ signed char i;
764
+ const float f = float(*this);
765
+ const signed char max_val = (signed char)0x7FU;
766
+ const signed char min_val = (signed char)0x80U;
767
+ const unsigned char bits = (*this).__x;
768
+ // saturation fixup
769
+ if ((bits & 0x7FU) > 0x7CU) {
770
+ // NaN
771
+ i = 0;
772
+ } else if (f > static_cast<float>(max_val)) {
773
+ // saturate maximum
774
+ i = max_val;
775
+ } else if (f < static_cast<float>(min_val)) {
776
+ // saturate minimum
777
+ i = min_val;
778
+ } else {
779
+ // normal value
780
+ i = static_cast<signed char>(f);
781
+ }
782
+ return i;
783
+ }
784
+ /**
785
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
786
+ * Conversion operator to \p short \p int data type.
787
+ * Clamps too large inputs to the output range.
788
+ * \p NaN inputs convert to \p zero.
789
+ */
790
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
791
+ return __half2short_rz(__half(*this));
792
+ }
793
+ /**
794
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
795
+ * Conversion operator to \p int data type.
796
+ * Clamps too large inputs to the output range.
797
+ * \p NaN inputs convert to \p zero.
798
+ */
799
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
800
+ return __half2int_rz(__half(*this));
801
+ }
802
+ /**
803
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
804
+ * Conversion operator to \p long \p long \p int data type.
805
+ * Clamps too large inputs to the output range.
806
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
807
+ */
808
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
809
+ return __half2ll_rz(__half(*this));
810
+ }
811
+
812
+ /**
813
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
814
+ * Conversion operator to \p bool data type.
815
+ * +0 and -0 inputs convert to \p false.
816
+ * Non-zero inputs convert to \p true.
817
+ */
818
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
819
+ return (__x & 0x7FU) != 0U;
820
+ }
821
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
822
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
823
+ };
824
+
825
+ /**
826
+ * \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind.
827
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
828
+ */
829
+
830
+ /**
831
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
832
+ * \brief __nv_fp8x2_e5m2 datatype
833
+ *
834
+ * \details This structure implements the datatype for handling two
835
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
836
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
837
+ *
838
+ * The structure implements converting constructors and operators.
839
+ */
840
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 {
841
+ public:
842
+ /**
843
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
844
+ * Storage variable contains the vector of two \p fp8 floating-point data
845
+ * values.
846
+ */
847
+ __nv_fp8x2_storage_t __x;
848
+
849
+ /**
850
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
851
+ * Constructor by default.
852
+ */
853
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
854
+ __nv_fp8x2_e5m2() = default;
855
+ #else
856
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {}
857
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
858
+
859
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
860
+
861
+ /* Construct from wider types */
862
+
863
+ /**
864
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
865
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
866
+ * behavior for out-of-range values.
867
+ */
868
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) {
869
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
870
+ __NV_SATFINITE, __NV_E5M2);
871
+ }
872
+ /**
873
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
874
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
875
+ * behavior for out-of-range values.
876
+ */
877
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) {
878
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
879
+ __NV_SATFINITE, __NV_E5M2);
880
+ }
881
+ /**
882
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
883
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
884
+ * behavior for out-of-range values.
885
+ */
886
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) {
887
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
888
+ }
889
+ /**
890
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
891
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
892
+ * behavior for out-of-range values.
893
+ */
894
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) {
895
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
896
+ }
897
+
898
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
899
+ /* Widening converts */
900
+ /**
901
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
902
+ * Conversion operator to \p __half2 data type.
903
+ */
904
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
905
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
906
+ }
907
+ /**
908
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
909
+ * Conversion operator to \p float2 data type.
910
+ */
911
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
912
+ return __internal_halfraw2_to_float2(
913
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
914
+ }
915
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
916
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
917
+ };
918
+
919
+ __CUDA_HOSTDEVICE_FP8_DECL__ unsigned int
920
+ __internal_pack_u16x2_to_u32(const unsigned short int src_lo,
921
+ const unsigned short int src_hi) {
922
+ unsigned int dst;
923
+ #if (defined __CUDACC__) && (defined __CUDA_ARCH__)
924
+ asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi));
925
+ #else
926
+ dst = (static_cast<unsigned int>(src_hi) << 16U) |
927
+ static_cast<unsigned int>(src_lo);
928
+ #endif
929
+ return dst;
930
+ }
931
+
932
+ /**
933
+ * \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind.
934
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
935
+ */
936
+
937
+ /**
938
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
939
+ * \brief __nv_fp8x4_e5m2 datatype
940
+ *
941
+ * \details This structure implements the datatype for handling four
942
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
943
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
944
+ *
945
+ * The structure implements converting constructors and operators.
946
+ */
947
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 {
948
+ public:
949
+ /**
950
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
951
+ * Storage variable contains the vector of four \p fp8 floating-point data
952
+ * values.
953
+ */
954
+ __nv_fp8x4_storage_t __x;
955
+
956
+ /**
957
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
958
+ * Constructor by default.
959
+ */
960
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
961
+ __nv_fp8x4_e5m2() = default;
962
+ #else
963
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {}
964
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
965
+
966
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
967
+
968
+ /* Construct from wider types */
969
+
970
+ /**
971
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
972
+ * Constructor from a pair of \p __half2 data type values,
973
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
974
+ */
975
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo,
976
+ const __half2 fhi) {
977
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
978
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2);
979
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
980
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
981
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
982
+ }
983
+ /**
984
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
985
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
986
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
987
+ */
988
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo,
989
+ const __nv_bfloat162 fhi) {
990
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
991
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2);
992
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
993
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
994
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
995
+ }
996
+ /**
997
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
998
+ * Constructor from \p float4 vector data type,
999
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1000
+ */
1001
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) {
1002
+ const float2 flo = {f.x, f.y};
1003
+ const float2 fhi = {f.z, f.w};
1004
+ const __nv_fp8x2_storage_t rlo =
1005
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1006
+ const __nv_fp8x2_storage_t rhi =
1007
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1008
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1009
+ }
1010
+ /**
1011
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1012
+ * Constructor from \p double4 vector data type,
1013
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1014
+ */
1015
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) {
1016
+ const double2 flo = {f.x, f.y};
1017
+ const double2 fhi = {f.z, f.w};
1018
+ const __nv_fp8x2_storage_t rlo =
1019
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1020
+ const __nv_fp8x2_storage_t rhi =
1021
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1022
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1023
+ }
1024
+
1025
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1026
+ /* Widening converts */
1027
+
1028
+ /**
1029
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1030
+ * Conversion operator to \p float4 vector data type.
1031
+ */
1032
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1033
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1034
+ const __nv_fp8x2_storage_t shi =
1035
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1036
+ float2 rlo = __internal_halfraw2_to_float2(
1037
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2));
1038
+ float2 rhi = __internal_halfraw2_to_float2(
1039
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2));
1040
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1041
+ return res;
1042
+ }
1043
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1044
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1045
+ };
1046
+
1047
+ /**
1048
+ * \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind.
1049
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1050
+ */
1051
+
1052
+ /**
1053
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1054
+ * \brief __nv_fp8_e4m3 datatype
1055
+ *
1056
+ * \details This structure implements the datatype for storing
1057
+ * \p fp8 floating-point numbers of \p e4m3 kind:
1058
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1059
+ * The encoding doesn't support Infinity.
1060
+ * NaNs are limited to 0x7F and 0xFF values.
1061
+ *
1062
+ * The structure implements converting constructors and operators.
1063
+ */
1064
+ struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 {
1065
+ public:
1066
+ /**
1067
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1068
+ * Storage variable contains the \p fp8 floating-point data.
1069
+ */
1070
+ __nv_fp8_storage_t __x;
1071
+
1072
+ /**
1073
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1074
+ * Constructor by default.
1075
+ */
1076
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1077
+ __nv_fp8_e4m3() = default;
1078
+ #else
1079
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {}
1080
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1081
+
1082
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1083
+
1084
+ /* Construct from wider FP types */
1085
+ /* Note we do avoid constructor init-list because of special host/device
1086
+ * compilation rules */
1087
+
1088
+ /**
1089
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1090
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
1091
+ * behavior for out-of-range values.
1092
+ */
1093
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) {
1094
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
1095
+ __NV_SATFINITE, __NV_E4M3);
1096
+ }
1097
+ /**
1098
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1099
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
1100
+ * behavior for out-of-range values.
1101
+ */
1102
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) {
1103
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
1104
+ __NV_SATFINITE, __NV_E4M3);
1105
+ }
1106
+ /**
1107
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1108
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
1109
+ * for out-of-range values.
1110
+ */
1111
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) {
1112
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1113
+ }
1114
+ /**
1115
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1116
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
1117
+ * behavior for out-of-range values.
1118
+ */
1119
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) {
1120
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1121
+ }
1122
+
1123
+ /* Converts from integral */
1124
+
1125
+ /**
1126
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1127
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
1128
+ * __NV_SATFINITE behavior for out-of-range values.
1129
+ */
1130
+ explicit __CUDA_HOSTDEVICE_FP8__
1131
+ __nv_fp8_e4m3(const unsigned short int val) {
1132
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1133
+ }
1134
+ /**
1135
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1136
+ * Constructor from \p unsigned \p int data type, relies on \p
1137
+ * __NV_SATFINITE behavior for out-of-range values.
1138
+ */
1139
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) {
1140
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1141
+ }
1142
+ /**
1143
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1144
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
1145
+ * \p __NV_SATFINITE behavior for out-of-range values.
1146
+ */
1147
+ explicit __CUDA_HOSTDEVICE_FP8__
1148
+ __nv_fp8_e4m3(const unsigned long long int val) {
1149
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1150
+ }
1151
+
1152
+ /**
1153
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1154
+ * Constructor from \p short \p int data type, relies on \p
1155
+ * __NV_SATFINITE behavior for out-of-range values.
1156
+ */
1157
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) {
1158
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1159
+ }
1160
+ /**
1161
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1162
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
1163
+ * for out-of-range values.
1164
+ */
1165
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) {
1166
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1167
+ }
1168
+ /**
1169
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1170
+ * Constructor from \p long \p long \p int data type, relies on \p
1171
+ * __NV_SATFINITE behavior for out-of-range values.
1172
+ */
1173
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) {
1174
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1175
+ }
1176
+
1177
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1178
+ /* Widening FP converts */
1179
+ /**
1180
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1181
+ * Conversion operator to \p __half data type.
1182
+ */
1183
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
1184
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1185
+ }
1186
+ /**
1187
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1188
+ * Conversion operator to \p float data type.
1189
+ */
1190
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
1191
+ return __internal_halfraw_to_float(
1192
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1193
+ }
1194
+ /**
1195
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1196
+ * Conversion operator to \p __nv_bfloat16 data type.
1197
+ */
1198
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
1199
+ return static_cast<__nv_bfloat16>(
1200
+ __internal_float_to_bf16raw_rz(float(*this)));
1201
+ }
1202
+ /**
1203
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1204
+ * Conversion operator to \p double data type.
1205
+ */
1206
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
1207
+ return static_cast<double>(float(*this));
1208
+ }
1209
+
1210
+ /* Convert to integral */
1211
+
1212
+ /**
1213
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1214
+ * Conversion operator to \p unsigned \p char data type.
1215
+ * Clamps negative and too large inputs to the output range.
1216
+ * \p NaN inputs convert to \p zero.
1217
+ */
1218
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
1219
+ unsigned char i;
1220
+ const float f = float(*this);
1221
+ const unsigned char max_val = 0xFFU;
1222
+ const unsigned char min_val = 0U;
1223
+ const unsigned char bits = (*this).__x;
1224
+ // saturation fixup
1225
+ if ((bits & 0x7FU) == 0x7FU) {
1226
+ // NaN
1227
+ i = 0;
1228
+ } else if (f > static_cast<float>(max_val)) {
1229
+ // saturate maximum
1230
+ i = max_val;
1231
+ } else if (f < static_cast<float>(min_val)) {
1232
+ // saturate minimum
1233
+ i = min_val;
1234
+ } else {
1235
+ // normal value
1236
+ i = static_cast<unsigned char>(f);
1237
+ }
1238
+ return i;
1239
+ }
1240
+
1241
+ /**
1242
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1243
+ * Conversion operator to \p unsigned \p short \p int data type.
1244
+ * Clamps negative inputs to zero.
1245
+ * \p NaN inputs convert to \p zero.
1246
+ */
1247
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
1248
+ return __half2ushort_rz(__half(*this));
1249
+ }
1250
+ /**
1251
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1252
+ * Conversion operator to \p unsigned \p int data type.
1253
+ * Clamps negative inputs to zero.
1254
+ * \p NaN inputs convert to \p zero.
1255
+ */
1256
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
1257
+ return __half2uint_rz(__half(*this));
1258
+ }
1259
+ /**
1260
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1261
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
1262
+ * Clamps negative inputs to zero.
1263
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
1264
+ */
1265
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
1266
+ return __half2ull_rz(__half(*this));
1267
+ }
1268
+
1269
+ /**
1270
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1271
+ * Conversion operator to \p signed \p char data type.
1272
+ * Clamps too large inputs to the output range.
1273
+ * \p NaN inputs convert to \p zero.
1274
+ */
1275
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
1276
+ signed char i;
1277
+ const float f = float(*this);
1278
+ const signed char max_val = (signed char)0x7FU;
1279
+ const signed char min_val = (signed char)0x80U;
1280
+ const unsigned char bits = (*this).__x;
1281
+ // saturation fixup
1282
+ if ((bits & 0x7FU) == 0x7FU) {
1283
+ // NaN
1284
+ i = 0;
1285
+ } else if (f > static_cast<float>(max_val)) {
1286
+ // saturate maximum
1287
+ i = max_val;
1288
+ } else if (f < static_cast<float>(min_val)) {
1289
+ // saturate minimum
1290
+ i = min_val;
1291
+ } else {
1292
+ // normal value
1293
+ i = static_cast<signed char>(f);
1294
+ }
1295
+ return i;
1296
+ }
1297
+ /**
1298
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1299
+ * Conversion operator to \p short \p int data type.
1300
+ * \p NaN inputs convert to \p zero.
1301
+ */
1302
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
1303
+ return __half2short_rz(__half(*this));
1304
+ }
1305
+ /**
1306
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1307
+ * Conversion operator to \p int data type.
1308
+ * \p NaN inputs convert to \p zero.
1309
+ */
1310
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
1311
+ return __half2int_rz(__half(*this));
1312
+ }
1313
+ /**
1314
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1315
+ * Conversion operator to \p long \p long \p int data type.
1316
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
1317
+ */
1318
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
1319
+ return __half2ll_rz(__half(*this));
1320
+ }
1321
+
1322
+ /**
1323
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1324
+ * Conversion operator to \p bool data type.
1325
+ * +0 and -0 inputs convert to \p false.
1326
+ * Non-zero inputs convert to \p true.
1327
+ */
1328
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
1329
+ return (__x & 0x7FU) != 0U;
1330
+ }
1331
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1332
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1333
+ };
1334
+
1335
+ /**
1336
+ * \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind.
1337
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1338
+ */
1339
+
1340
+ /**
1341
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1342
+ * \brief __nv_fp8x2_e4m3 datatype
1343
+ *
1344
+ * \details This structure implements the datatype for storage
1345
+ * and operations on the vector of two \p fp8 values of \p e4m3 kind each:
1346
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1347
+ * The encoding doesn't support Infinity.
1348
+ * NaNs are limited to 0x7F and 0xFF values.
1349
+ */
1350
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 {
1351
+ public:
1352
+ /**
1353
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1354
+ * Storage variable contains the vector of two \p fp8 floating-point data
1355
+ * values.
1356
+ */
1357
+ __nv_fp8x2_storage_t __x;
1358
+
1359
+ /**
1360
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1361
+ * Constructor by default.
1362
+ */
1363
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1364
+ __nv_fp8x2_e4m3() = default;
1365
+ #else
1366
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {}
1367
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1368
+
1369
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1370
+
1371
+ /* Construct from wider types */
1372
+
1373
+ /**
1374
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1375
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
1376
+ * behavior for out-of-range values.
1377
+ */
1378
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) {
1379
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
1380
+ __NV_SATFINITE, __NV_E4M3);
1381
+ }
1382
+ /**
1383
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1384
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
1385
+ * behavior for out-of-range values.
1386
+ */
1387
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) {
1388
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
1389
+ __NV_SATFINITE, __NV_E4M3);
1390
+ }
1391
+ /**
1392
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1393
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
1394
+ * behavior for out-of-range values.
1395
+ */
1396
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) {
1397
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1398
+ }
1399
+ /**
1400
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1401
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
1402
+ * behavior for out-of-range values.
1403
+ */
1404
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) {
1405
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1406
+ }
1407
+
1408
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1409
+ /* Widening converts */
1410
+ /**
1411
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1412
+ * Conversion operator to \p __half2 data type.
1413
+ */
1414
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
1415
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1416
+ }
1417
+ /**
1418
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1419
+ * Conversion operator to \p float2 data type.
1420
+ */
1421
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
1422
+ return __internal_halfraw2_to_float2(
1423
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1424
+ }
1425
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1426
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1427
+ };
1428
+
1429
+ /**
1430
+ * \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind.
1431
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1432
+ */
1433
+
1434
+ /**
1435
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1436
+ * \brief __nv_fp8x4_e4m3 datatype
1437
+ *
1438
+ * \details This structure implements the datatype for storage
1439
+ * and operations on the vector of four \p fp8 values of \p e4m3 kind each:
1440
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1441
+ * The encoding doesn't support Infinity.
1442
+ * NaNs are limited to 0x7F and 0xFF values.
1443
+ */
1444
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 {
1445
+ public:
1446
+ /**
1447
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1448
+ * Storage variable contains the vector of four \p fp8 floating-point data
1449
+ * values.
1450
+ */
1451
+ __nv_fp8x4_storage_t __x;
1452
+
1453
+ /**
1454
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1455
+ * Constructor by default.
1456
+ */
1457
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1458
+ __nv_fp8x4_e4m3() = default;
1459
+ #else
1460
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {}
1461
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1462
+
1463
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1464
+
1465
+ /* Construct from wider types */
1466
+
1467
+ /**
1468
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1469
+ * Constructor from a pair of \p __half2 data type values,
1470
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1471
+ */
1472
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo,
1473
+ const __half2 fhi) {
1474
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
1475
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1476
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
1477
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1478
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1479
+ }
1480
+ /**
1481
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1482
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
1483
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1484
+ */
1485
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo,
1486
+ const __nv_bfloat162 fhi) {
1487
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
1488
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1489
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
1490
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1491
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1492
+ }
1493
+ /**
1494
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1495
+ * Constructor from \p float4 vector data type,
1496
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1497
+ */
1498
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) {
1499
+ const float2 flo = {f.x, f.y};
1500
+ const float2 fhi = {f.z, f.w};
1501
+ const __nv_fp8x2_storage_t rlo =
1502
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1503
+ const __nv_fp8x2_storage_t rhi =
1504
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1505
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1506
+ }
1507
+ /**
1508
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1509
+ * Constructor from \p double4 vector data type,
1510
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1511
+ */
1512
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) {
1513
+ const double2 flo = {f.x, f.y};
1514
+ const double2 fhi = {f.z, f.w};
1515
+ const __nv_fp8x2_storage_t rlo =
1516
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1517
+ const __nv_fp8x2_storage_t rhi =
1518
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1519
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1520
+ }
1521
+
1522
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1523
+ /* Widening converts */
1524
+
1525
+ /**
1526
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1527
+ * Conversion operator to \p float4 vector data type.
1528
+ */
1529
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1530
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1531
+ const __nv_fp8x2_storage_t shi =
1532
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1533
+ float2 rlo = __internal_halfraw2_to_float2(
1534
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3));
1535
+ float2 rhi = __internal_halfraw2_to_float2(
1536
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3));
1537
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1538
+ return res;
1539
+ }
1540
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1541
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1542
+ };
1543
+
1544
+ #endif /* defined(__cplusplus) */
1545
+
1546
+ #endif /* end of include guard: __CUDA_FP8_HPP__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_GL_INTEROP_H__)
51
+ #define __CUDA_GL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #if defined(__APPLE__)
56
+
57
+ #include <OpenGL/gl.h>
58
+
59
+ #else /* __APPLE__ */
60
+
61
+ #if defined(__arm__) || defined(__aarch64__)
62
+ #ifndef GL_VERSION
63
+ #error Please include the appropriate gl headers before including cuda_gl_interop.h
64
+ #endif
65
+ #else
66
+ #include <GL/gl.h>
67
+ #endif
68
+
69
+ #endif /* __APPLE__ */
70
+
71
+ /** \cond impl_private */
72
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
73
+ #define __CUDA_DEPRECATED
74
+ #elif defined(_MSC_VER)
75
+ #define __CUDA_DEPRECATED __declspec(deprecated)
76
+ #elif defined(__GNUC__)
77
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
78
+ #else
79
+ #define __CUDA_DEPRECATED
80
+ #endif
81
+ /** \endcond impl_private */
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif /* __cplusplus */
86
+
87
+ /**
88
+ * \addtogroup CUDART_OPENGL OpenGL Interoperability
89
+ * This section describes the OpenGL interoperability functions of the CUDA
90
+ * runtime application programming interface. Note that mapping of OpenGL
91
+ * resources is performed with the graphics API agnostic, resource mapping
92
+ * interface described in \ref CUDART_INTEROP "Graphics Interopability".
93
+ *
94
+ * @{
95
+ */
96
+
97
+ /**
98
+ * CUDA devices corresponding to the current OpenGL context
99
+ */
100
+ enum cudaGLDeviceList
101
+ {
102
+ cudaGLDeviceListAll = 1, /**< The CUDA devices for all GPUs used by the current OpenGL context */
103
+ cudaGLDeviceListCurrentFrame = 2, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
104
+ cudaGLDeviceListNextFrame = 3 /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
105
+ };
106
+
107
+ /**
108
+ * \brief Gets the CUDA devices associated with the current OpenGL context
109
+ *
110
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
111
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
112
+ * at most \p cudaDeviceCount of the CUDA-compatible devices corresponding to
113
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
114
+ * context are not CUDA capable then the call will return ::cudaErrorNoDevice.
115
+ *
116
+ * \param pCudaDeviceCount - Returned number of CUDA devices corresponding to the
117
+ * current OpenGL context
118
+ * \param pCudaDevices - Returned CUDA devices corresponding to the current
119
+ * OpenGL context
120
+ * \param cudaDeviceCount - The size of the output device array \p pCudaDevices
121
+ * \param deviceList - The set of devices to return. This set may be
122
+ * ::cudaGLDeviceListAll for all devices,
123
+ * ::cudaGLDeviceListCurrentFrame for the devices used to
124
+ * render the current frame (in SLI), or
125
+ * ::cudaGLDeviceListNextFrame for the devices used to
126
+ * render the next frame (in SLI).
127
+ *
128
+ * \return
129
+ * ::cudaSuccess,
130
+ * ::cudaErrorNoDevice,
131
+ * ::cudaErrorInvalidGraphicsContext,
132
+ * ::cudaErrorOperatingSystem,
133
+ * ::cudaErrorUnknown
134
+ *
135
+ * \note This function is not supported on Mac OS X.
136
+ * \notefnerr
137
+ *
138
+ * \sa
139
+ * ::cudaGraphicsUnregisterResource,
140
+ * ::cudaGraphicsMapResources,
141
+ * ::cudaGraphicsSubResourceGetMappedArray,
142
+ * ::cudaGraphicsResourceGetMappedPointer,
143
+ * ::cuGLGetDevices
144
+ */
145
+ extern __host__ cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices, unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList);
146
+
147
+ /**
148
+ * \brief Register an OpenGL texture or renderbuffer object
149
+ *
150
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
151
+ * A handle to the registered object is returned as \p resource.
152
+ *
153
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
154
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
155
+ * or ::GL_RENDERBUFFER.
156
+ *
157
+ * The register flags \p flags specify the intended usage, as follows:
158
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
159
+ * resource will be used. It is therefore assumed that this resource will be
160
+ * read from and written to by CUDA. This is the default value.
161
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
162
+ * will not write to this resource.
163
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
164
+ * CUDA will not read from this resource and will write over the
165
+ * entire contents of the resource, so none of the data previously
166
+ * stored in the resource will be preserved.
167
+ * - ::cudaGraphicsRegisterFlagsSurfaceLoadStore: Specifies that CUDA will
168
+ * bind this resource to a surface reference.
169
+ * - ::cudaGraphicsRegisterFlagsTextureGather: Specifies that CUDA will perform
170
+ * texture gather operations on this resource.
171
+ *
172
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
173
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
174
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
175
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
176
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
177
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
178
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
179
+ *
180
+ * The following image classes are currently disallowed:
181
+ * - Textures with borders
182
+ * - Multisampled renderbuffers
183
+ *
184
+ * \param resource - Pointer to the returned object handle
185
+ * \param image - name of texture or renderbuffer object to be registered
186
+ * \param target - Identifies the type of object specified by \p image
187
+ * \param flags - Register flags
188
+ *
189
+ * \return
190
+ * ::cudaSuccess,
191
+ * ::cudaErrorInvalidDevice,
192
+ * ::cudaErrorInvalidValue,
193
+ * ::cudaErrorInvalidResourceHandle,
194
+ * ::cudaErrorOperatingSystem,
195
+ * ::cudaErrorUnknown
196
+ * \notefnerr
197
+ *
198
+ * \sa
199
+ * ::cudaGraphicsUnregisterResource,
200
+ * ::cudaGraphicsMapResources,
201
+ * ::cudaGraphicsSubResourceGetMappedArray,
202
+ * ::cuGraphicsGLRegisterImage
203
+ */
204
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(struct cudaGraphicsResource **resource, GLuint image, GLenum target, unsigned int flags);
205
+
206
+ /**
207
+ * \brief Registers an OpenGL buffer object
208
+ *
209
+ * Registers the buffer object specified by \p buffer for access by
210
+ * CUDA. A handle to the registered object is returned as \p
211
+ * resource. The register flags \p flags specify the intended usage,
212
+ * as follows:
213
+ *
214
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
215
+ * resource will be used. It is therefore assumed that this resource will be
216
+ * read from and written to by CUDA. This is the default value.
217
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
218
+ * will not write to this resource.
219
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
220
+ * CUDA will not read from this resource and will write over the
221
+ * entire contents of the resource, so none of the data previously
222
+ * stored in the resource will be preserved.
223
+ *
224
+ * \param resource - Pointer to the returned object handle
225
+ * \param buffer - name of buffer object to be registered
226
+ * \param flags - Register flags
227
+ *
228
+ * \return
229
+ * ::cudaSuccess,
230
+ * ::cudaErrorInvalidDevice,
231
+ * ::cudaErrorInvalidValue,
232
+ * ::cudaErrorInvalidResourceHandle,
233
+ * ::cudaErrorOperatingSystem,
234
+ * ::cudaErrorUnknown
235
+ * \notefnerr
236
+ *
237
+ * \sa
238
+ * ::cudaGraphicsUnregisterResource,
239
+ * ::cudaGraphicsMapResources,
240
+ * ::cudaGraphicsResourceGetMappedPointer,
241
+ * ::cuGraphicsGLRegisterBuffer
242
+ */
243
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(struct cudaGraphicsResource **resource, GLuint buffer, unsigned int flags);
244
+
245
+ #ifdef _WIN32
246
+ #ifndef WGL_NV_gpu_affinity
247
+ typedef void* HGPUNV;
248
+ #endif
249
+
250
+ /**
251
+ * \brief Gets the CUDA device associated with hGpu
252
+ *
253
+ * Returns the CUDA device associated with a hGpu, if applicable.
254
+ *
255
+ * \param device - Returns the device associated with hGpu, or -1 if hGpu is
256
+ * not a compute device.
257
+ * \param hGpu - Handle to a GPU, as queried via WGL_NV_gpu_affinity
258
+ *
259
+ * \return
260
+ * ::cudaSuccess
261
+ * \notefnerr
262
+ *
263
+ * \sa
264
+ * ::WGL_NV_gpu_affinity,
265
+ * ::cuWGLGetDevice
266
+ */
267
+ extern __host__ cudaError_t CUDARTAPI cudaWGLGetDevice(int *device, HGPUNV hGpu);
268
+ #endif
269
+
270
+ /** @} */ /* END CUDART_OPENGL */
271
+
272
+ /**
273
+ * \addtogroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED]
274
+ * This section describes deprecated OpenGL interoperability functionality.
275
+ *
276
+ * @{
277
+ */
278
+
279
+ /**
280
+ * CUDA GL Map Flags
281
+ */
282
+ enum cudaGLMapFlags
283
+ {
284
+ cudaGLMapFlagsNone = 0, /**< Default; Assume resource can be read/written */
285
+ cudaGLMapFlagsReadOnly = 1, /**< CUDA kernels will not write to this resource */
286
+ cudaGLMapFlagsWriteDiscard = 2 /**< CUDA kernels will only write to and will not read from this resource */
287
+ };
288
+
289
+ /**
290
+ * \brief Sets a CUDA device to use OpenGL interoperability
291
+ *
292
+ * \deprecated This function is deprecated as of CUDA 5.0.
293
+ *
294
+ * This function is deprecated and should no longer be used. It is
295
+ * no longer necessary to associate a CUDA device with an OpenGL
296
+ * context in order to achieve maximum interoperability performance.
297
+ *
298
+ * This function will immediately initialize the primary context on
299
+ * \p device if needed.
300
+ *
301
+ * \param device - Device to use for OpenGL interoperability
302
+ *
303
+ * \return
304
+ * ::cudaSuccess,
305
+ * ::cudaErrorInvalidDevice,
306
+ * ::cudaErrorSetOnActiveProcess
307
+ * \notefnerr
308
+ *
309
+ * \sa ::cudaGraphicsGLRegisterBuffer, ::cudaGraphicsGLRegisterImage
310
+ */
311
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetGLDevice(int device);
312
+
313
+ /**
314
+ * \brief Registers a buffer object for access by CUDA
315
+ *
316
+ * \deprecated This function is deprecated as of CUDA 3.0.
317
+ *
318
+ * Registers the buffer object of ID \p bufObj for access by
319
+ * CUDA. This function must be called before CUDA can map the buffer
320
+ * object. The OpenGL context used to create the buffer, or another
321
+ * context from the same share group, must be bound to the current
322
+ * thread when this is called.
323
+ *
324
+ * \param bufObj - Buffer object ID to register
325
+ *
326
+ * \return
327
+ * ::cudaSuccess,
328
+ * ::cudaErrorInitializationError
329
+ * \notefnerr
330
+ *
331
+ * \sa ::cudaGraphicsGLRegisterBuffer
332
+ */
333
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLRegisterBufferObject(GLuint bufObj);
334
+
335
+ /**
336
+ * \brief Maps a buffer object for access by CUDA
337
+ *
338
+ * \deprecated This function is deprecated as of CUDA 3.0.
339
+ *
340
+ * Maps the buffer object of ID \p bufObj into the address space of
341
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
342
+ * mapping. The buffer must have previously been registered by
343
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
344
+ * by CUDA, any OpenGL operation which references the buffer will
345
+ * result in undefined behavior. The OpenGL context used to create
346
+ * the buffer, or another context from the same share group, must be
347
+ * bound to the current thread when this is called.
348
+ *
349
+ * All streams in the current thread are synchronized with the current
350
+ * GL context.
351
+ *
352
+ * \param devPtr - Returned device pointer to CUDA object
353
+ * \param bufObj - Buffer object ID to map
354
+ *
355
+ * \return
356
+ * ::cudaSuccess,
357
+ * ::cudaErrorMapBufferObjectFailed
358
+ * \notefnerr
359
+ *
360
+ * \sa ::cudaGraphicsMapResources
361
+ */
362
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObject(void **devPtr, GLuint bufObj);
363
+
364
+ /**
365
+ * \brief Unmaps a buffer object for access by CUDA
366
+ *
367
+ * \deprecated This function is deprecated as of CUDA 3.0.
368
+ *
369
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
370
+ * a buffer is unmapped, the base address returned by
371
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
372
+ * the address result in undefined behavior. The OpenGL context used
373
+ * to create the buffer, or another context from the same share group,
374
+ * must be bound to the current thread when this is called.
375
+ *
376
+ * All streams in the current thread are synchronized with the current
377
+ * GL context.
378
+ *
379
+ * \param bufObj - Buffer object to unmap
380
+ *
381
+ * \return
382
+ * ::cudaSuccess,
383
+ * ::cudaErrorUnmapBufferObjectFailed
384
+ * \notefnerr
385
+ *
386
+ * \sa ::cudaGraphicsUnmapResources
387
+ */
388
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObject(GLuint bufObj);
389
+
390
+ /**
391
+ * \brief Unregisters a buffer object for access by CUDA
392
+ *
393
+ * \deprecated This function is deprecated as of CUDA 3.0.
394
+ *
395
+ * Unregisters the buffer object of ID \p bufObj for access by CUDA
396
+ * and releases any CUDA resources associated with the buffer. Once a
397
+ * buffer is unregistered, it may no longer be mapped by CUDA. The GL
398
+ * context used to create the buffer, or another context from the
399
+ * same share group, must be bound to the current thread when this is
400
+ * called.
401
+ *
402
+ * \param bufObj - Buffer object to unregister
403
+ *
404
+ * \return
405
+ * ::cudaSuccess
406
+ * \notefnerr
407
+ *
408
+ * \sa ::cudaGraphicsUnregisterResource
409
+ */
410
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnregisterBufferObject(GLuint bufObj);
411
+
412
+ /**
413
+ * \brief Set usage flags for mapping an OpenGL buffer
414
+ *
415
+ * \deprecated This function is deprecated as of CUDA 3.0.
416
+ *
417
+ * Set flags for mapping the OpenGL buffer \p bufObj
418
+ *
419
+ * Changes to flags will take effect the next time \p bufObj is mapped.
420
+ * The \p flags argument may be any of the following:
421
+ *
422
+ * - ::cudaGLMapFlagsNone: Specifies no hints about how this buffer will
423
+ * be used. It is therefore assumed that this buffer will be read from and
424
+ * written to by CUDA kernels. This is the default value.
425
+ * - ::cudaGLMapFlagsReadOnly: Specifies that CUDA kernels which access this
426
+ * buffer will not write to the buffer.
427
+ * - ::cudaGLMapFlagsWriteDiscard: Specifies that CUDA kernels which access
428
+ * this buffer will not read from the buffer and will write over the
429
+ * entire contents of the buffer, so none of the data previously stored in
430
+ * the buffer will be preserved.
431
+ *
432
+ * If \p bufObj has not been registered for use with CUDA, then
433
+ * ::cudaErrorInvalidResourceHandle is returned. If \p bufObj is presently
434
+ * mapped for access by CUDA, then ::cudaErrorUnknown is returned.
435
+ *
436
+ * \param bufObj - Registered buffer object to set flags for
437
+ * \param flags - Parameters for buffer mapping
438
+ *
439
+ * \return
440
+ * ::cudaSuccess,
441
+ * ::cudaErrorInvalidValue,
442
+ * ::cudaErrorInvalidResourceHandle,
443
+ * ::cudaErrorUnknown
444
+ * \notefnerr
445
+ *
446
+ * \sa ::cudaGraphicsResourceSetMapFlags
447
+ */
448
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetBufferObjectMapFlags(GLuint bufObj, unsigned int flags);
449
+
450
+ /**
451
+ * \brief Maps a buffer object for access by CUDA
452
+ *
453
+ * \deprecated This function is deprecated as of CUDA 3.0.
454
+ *
455
+ * Maps the buffer object of ID \p bufObj into the address space of
456
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
457
+ * mapping. The buffer must have previously been registered by
458
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
459
+ * by CUDA, any OpenGL operation which references the buffer will
460
+ * result in undefined behavior. The OpenGL context used to create
461
+ * the buffer, or another context from the same share group, must be
462
+ * bound to the current thread when this is called.
463
+ *
464
+ * Stream /p stream is synchronized with the current GL context.
465
+ *
466
+ * \param devPtr - Returned device pointer to CUDA object
467
+ * \param bufObj - Buffer object ID to map
468
+ * \param stream - Stream to synchronize
469
+ *
470
+ * \return
471
+ * ::cudaSuccess,
472
+ * ::cudaErrorMapBufferObjectFailed
473
+ * \notefnerr
474
+ *
475
+ * \sa ::cudaGraphicsMapResources
476
+ */
477
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObjectAsync(void **devPtr, GLuint bufObj, cudaStream_t stream);
478
+
479
+ /**
480
+ * \brief Unmaps a buffer object for access by CUDA
481
+ *
482
+ * \deprecated This function is deprecated as of CUDA 3.0.
483
+ *
484
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
485
+ * a buffer is unmapped, the base address returned by
486
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
487
+ * the address result in undefined behavior. The OpenGL context used
488
+ * to create the buffer, or another context from the same share group,
489
+ * must be bound to the current thread when this is called.
490
+ *
491
+ * Stream /p stream is synchronized with the current GL context.
492
+ *
493
+ * \param bufObj - Buffer object to unmap
494
+ * \param stream - Stream to synchronize
495
+ *
496
+ * \return
497
+ * ::cudaSuccess,
498
+ * ::cudaErrorUnmapBufferObjectFailed
499
+ * \notefnerr
500
+ *
501
+ * \sa ::cudaGraphicsUnmapResources
502
+ */
503
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObjectAsync(GLuint bufObj, cudaStream_t stream);
504
+
505
+ /** @} */ /* END CUDART_OPENGL_DEPRECATED */
506
+
507
+ #if defined(__cplusplus)
508
+ }
509
+ #endif /* __cplusplus */
510
+
511
+ #undef __CUDA_DEPRECATED
512
+
513
+ #endif /* __CUDA_GL_INTEROP_H__ */
514
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_VDPAU_INTEROP_H__)
51
+ #define __CUDA_VDPAU_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #include <vdpau/vdpau.h>
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /**
62
+ * \addtogroup CUDART_VDPAU VDPAU Interoperability
63
+ * This section describes the VDPAU interoperability functions of the CUDA
64
+ * runtime application programming interface.
65
+ *
66
+ * @{
67
+ */
68
+
69
+ /**
70
+ * \brief Gets the CUDA device associated with a VdpDevice.
71
+ *
72
+ * Returns the CUDA device associated with a VdpDevice, if applicable.
73
+ *
74
+ * \param device - Returns the device associated with vdpDevice, or -1 if
75
+ * the device associated with vdpDevice is not a compute device.
76
+ * \param vdpDevice - A VdpDevice handle
77
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
78
+ *
79
+ * \return
80
+ * ::cudaSuccess
81
+ * \notefnerr
82
+ *
83
+ * \sa
84
+ * ::cudaVDPAUSetVDPAUDevice,
85
+ * ::cuVDPAUGetDevice
86
+ */
87
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
88
+
89
+ /**
90
+ * \brief Sets a CUDA device to use VDPAU interoperability
91
+ *
92
+ * Records \p vdpDevice as the VdpDevice for VDPAU interoperability
93
+ * with the CUDA device \p device and sets \p device as the current
94
+ * device for the calling host thread.
95
+ *
96
+ * This function will immediately initialize the primary context on
97
+ * \p device if needed.
98
+ *
99
+ * If \p device has already been initialized then this call will fail
100
+ * with the error ::cudaErrorSetOnActiveProcess. In this case it is
101
+ * necessary to reset \p device using ::cudaDeviceReset() before
102
+ * VDPAU interoperability on \p device may be enabled.
103
+ *
104
+ * \param device - Device to use for VDPAU interoperability
105
+ * \param vdpDevice - The VdpDevice to interoperate with
106
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
107
+ *
108
+ * \return
109
+ * ::cudaSuccess,
110
+ * ::cudaErrorInvalidDevice,
111
+ * ::cudaErrorSetOnActiveProcess
112
+ * \notefnerr
113
+ *
114
+ * \sa ::cudaGraphicsVDPAURegisterVideoSurface,
115
+ * ::cudaGraphicsVDPAURegisterOutputSurface,
116
+ * ::cudaDeviceReset
117
+ */
118
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
119
+
120
+ /**
121
+ * \brief Register a VdpVideoSurface object
122
+ *
123
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA.
124
+ * A handle to the registered object is returned as \p resource.
125
+ * The surface's intended usage is specified using \p flags, as follows:
126
+ *
127
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
128
+ * resource will be used. It is therefore assumed that this resource will be
129
+ * read from and written to by CUDA. This is the default value.
130
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
131
+ * will not write to this resource.
132
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
133
+ * CUDA will not read from this resource and will write over the
134
+ * entire contents of the resource, so none of the data previously
135
+ * stored in the resource will be preserved.
136
+ *
137
+ * \param resource - Pointer to the returned object handle
138
+ * \param vdpSurface - VDPAU object to be registered
139
+ * \param flags - Map flags
140
+ *
141
+ * \return
142
+ * ::cudaSuccess,
143
+ * ::cudaErrorInvalidDevice,
144
+ * ::cudaErrorInvalidValue,
145
+ * ::cudaErrorInvalidResourceHandle,
146
+ * ::cudaErrorUnknown
147
+ * \notefnerr
148
+ *
149
+ * \sa
150
+ * ::cudaVDPAUSetVDPAUDevice,
151
+ * ::cudaGraphicsUnregisterResource,
152
+ * ::cudaGraphicsSubResourceGetMappedArray,
153
+ * ::cuGraphicsVDPAURegisterVideoSurface
154
+ */
155
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags);
156
+
157
+ /**
158
+ * \brief Register a VdpOutputSurface object
159
+ *
160
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA.
161
+ * A handle to the registered object is returned as \p resource.
162
+ * The surface's intended usage is specified using \p flags, as follows:
163
+ *
164
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
165
+ * resource will be used. It is therefore assumed that this resource will be
166
+ * read from and written to by CUDA. This is the default value.
167
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
168
+ * will not write to this resource.
169
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
170
+ * CUDA will not read from this resource and will write over the
171
+ * entire contents of the resource, so none of the data previously
172
+ * stored in the resource will be preserved.
173
+ *
174
+ * \param resource - Pointer to the returned object handle
175
+ * \param vdpSurface - VDPAU object to be registered
176
+ * \param flags - Map flags
177
+ *
178
+ * \return
179
+ * ::cudaSuccess,
180
+ * ::cudaErrorInvalidDevice,
181
+ * ::cudaErrorInvalidValue,
182
+ * ::cudaErrorInvalidResourceHandle,
183
+ * ::cudaErrorUnknown
184
+ * \notefnerr
185
+ *
186
+ * \sa
187
+ * ::cudaVDPAUSetVDPAUDevice,
188
+ * ::cudaGraphicsUnregisterResource,
189
+ * ::cudaGraphicsSubResourceGetMappedArray,
190
+ * ::cuGraphicsVDPAURegisterOutputSurface
191
+ */
192
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags);
193
+
194
+ /** @} */ /* END CUDART_VDPAU */
195
+
196
+ #if defined(__cplusplus)
197
+ }
198
+ #endif /* __cplusplus */
199
+
200
+ #endif /* __CUDA_VDPAU_INTEROP_H__ */
201
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/device_double_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
65
+ #endif
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_TYPES_H__)
51
+ #define __DEVICE_TYPES_H__
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
55
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
56
+ #endif
57
+
58
+ #ifndef __DOXYGEN_ONLY__
59
+ #include "crt/host_defines.h"
60
+ #endif
61
+
62
+ /*******************************************************************************
63
+ * *
64
+ * *
65
+ * *
66
+ *******************************************************************************/
67
+
68
+ enum __device_builtin__ cudaRoundMode
69
+ {
70
+ cudaRoundNearest,
71
+ cudaRoundZero,
72
+ cudaRoundPosInf,
73
+ cudaRoundMinInf
74
+ };
75
+
76
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__)
77
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
78
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
79
+ #endif
80
+
81
+ #endif /* !__DEVICE_TYPES_H__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/host_config.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
65
+ #endif
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__LIBRARY_TYPES_H__)
51
+ #define __LIBRARY_TYPES_H__
52
+
53
+
54
+
55
+ typedef enum cudaDataType_t
56
+ {
57
+ CUDA_R_16F = 2, /* real as a half */
58
+ CUDA_C_16F = 6, /* complex as a pair of half numbers */
59
+ CUDA_R_16BF = 14, /* real as a nv_bfloat16 */
60
+ CUDA_C_16BF = 15, /* complex as a pair of nv_bfloat16 numbers */
61
+ CUDA_R_32F = 0, /* real as a float */
62
+ CUDA_C_32F = 4, /* complex as a pair of float numbers */
63
+ CUDA_R_64F = 1, /* real as a double */
64
+ CUDA_C_64F = 5, /* complex as a pair of double numbers */
65
+ CUDA_R_4I = 16, /* real as a signed 4-bit int */
66
+ CUDA_C_4I = 17, /* complex as a pair of signed 4-bit int numbers */
67
+ CUDA_R_4U = 18, /* real as a unsigned 4-bit int */
68
+ CUDA_C_4U = 19, /* complex as a pair of unsigned 4-bit int numbers */
69
+ CUDA_R_8I = 3, /* real as a signed 8-bit int */
70
+ CUDA_C_8I = 7, /* complex as a pair of signed 8-bit int numbers */
71
+ CUDA_R_8U = 8, /* real as a unsigned 8-bit int */
72
+ CUDA_C_8U = 9, /* complex as a pair of unsigned 8-bit int numbers */
73
+ CUDA_R_16I = 20, /* real as a signed 16-bit int */
74
+ CUDA_C_16I = 21, /* complex as a pair of signed 16-bit int numbers */
75
+ CUDA_R_16U = 22, /* real as a unsigned 16-bit int */
76
+ CUDA_C_16U = 23, /* complex as a pair of unsigned 16-bit int numbers */
77
+ CUDA_R_32I = 10, /* real as a signed 32-bit int */
78
+ CUDA_C_32I = 11, /* complex as a pair of signed 32-bit int numbers */
79
+ CUDA_R_32U = 12, /* real as a unsigned 32-bit int */
80
+ CUDA_C_32U = 13, /* complex as a pair of unsigned 32-bit int numbers */
81
+ CUDA_R_64I = 24, /* real as a signed 64-bit int */
82
+ CUDA_C_64I = 25, /* complex as a pair of signed 64-bit int numbers */
83
+ CUDA_R_64U = 26, /* real as a unsigned 64-bit int */
84
+ CUDA_C_64U = 27, /* complex as a pair of unsigned 64-bit int numbers */
85
+ CUDA_R_8F_E4M3 = 28, /* real as a nv_fp8_e4m3 */
86
+ CUDA_R_8F_E5M2 = 29, /* real as a nv_fp8_e5m2 */
87
+ } cudaDataType;
88
+
89
+
90
+ typedef enum libraryPropertyType_t
91
+ {
92
+ MAJOR_VERSION,
93
+ MINOR_VERSION,
94
+ PATCH_LEVEL
95
+ } libraryPropertyType;
96
+
97
+
98
+ #ifndef __cplusplus
99
+ typedef enum cudaDataType_t cudaDataType_t;
100
+ typedef enum libraryPropertyType_t libraryPropertyType_t;
101
+ #endif
102
+
103
+ #endif /* !__LIBRARY_TYPES_H__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__MATH_CONSTANTS_H__)
51
+ #define __MATH_CONSTANTS_H__
52
+
53
+ /* single precision constants */
54
+ #define CUDART_INF_F __int_as_float(0x7f800000U)
55
+ #define CUDART_NAN_F __int_as_float(0x7fffffffU)
56
+ #define CUDART_MIN_DENORM_F __int_as_float(0x00000001U)
57
+ #define CUDART_MAX_NORMAL_F __int_as_float(0x7f7fffffU)
58
+ #define CUDART_NEG_ZERO_F __int_as_float(0x80000000U)
59
+ #define CUDART_ZERO_F 0.0F
60
+ #define CUDART_ONE_F 1.0F
61
+ #define CUDART_SQRT_HALF_F 0.707106781F
62
+ #define CUDART_SQRT_HALF_HI_F 0.707106781F
63
+ #define CUDART_SQRT_HALF_LO_F 1.210161749e-08F
64
+ #define CUDART_SQRT_TWO_F 1.414213562F
65
+ #define CUDART_THIRD_F 0.333333333F
66
+ #define CUDART_PIO4_F 0.785398163F
67
+ #define CUDART_PIO2_F 1.570796327F
68
+ #define CUDART_3PIO4_F 2.356194490F
69
+ #define CUDART_2_OVER_PI_F 0.636619772F
70
+ #define CUDART_SQRT_2_OVER_PI_F 0.797884561F
71
+ #define CUDART_PI_F 3.141592654F
72
+ #define CUDART_L2E_F 1.442695041F
73
+ #define CUDART_L2T_F 3.321928094F
74
+ #define CUDART_LG2_F 0.301029996F
75
+ #define CUDART_LGE_F 0.434294482F
76
+ #define CUDART_LN2_F 0.693147181F
77
+ #define CUDART_LNT_F 2.302585093F
78
+ #define CUDART_LNPI_F 1.144729886F
79
+ #define CUDART_TWO_TO_M126_F 1.175494351e-38F
80
+ #define CUDART_TWO_TO_126_F 8.507059173e37F
81
+ #define CUDART_NORM_HUGE_F 3.402823466e38F
82
+ #define CUDART_TWO_TO_23_F 8388608.0F
83
+ #define CUDART_TWO_TO_24_F 16777216.0F
84
+ #define CUDART_TWO_TO_31_F 2147483648.0F
85
+ #define CUDART_TWO_TO_32_F 4294967296.0F
86
+ #define CUDART_REMQUO_BITS_F 3U
87
+ #define CUDART_REMQUO_MASK_F (~((~0U)<<CUDART_REMQUO_BITS_F))
88
+ #define CUDART_TRIG_PLOSS_F 105615.0F
89
+
90
+ /* double precision constants */
91
+ #define CUDART_INF __longlong_as_double(0x7ff0000000000000ULL)
92
+ #define CUDART_NAN __longlong_as_double(0xfff8000000000000ULL)
93
+ #define CUDART_NEG_ZERO __longlong_as_double(0x8000000000000000ULL)
94
+ #define CUDART_MIN_DENORM __longlong_as_double(0x0000000000000001ULL)
95
+ #define CUDART_ZERO 0.0
96
+ #define CUDART_ONE 1.0
97
+ #define CUDART_SQRT_TWO 1.4142135623730951e+0
98
+ #define CUDART_SQRT_HALF 7.0710678118654757e-1
99
+ #define CUDART_SQRT_HALF_HI 7.0710678118654757e-1
100
+ #define CUDART_SQRT_HALF_LO (-4.8336466567264567e-17)
101
+ #define CUDART_THIRD 3.3333333333333333e-1
102
+ #define CUDART_TWOTHIRD 6.6666666666666667e-1
103
+ #define CUDART_PIO4 7.8539816339744828e-1
104
+ #define CUDART_PIO4_HI 7.8539816339744828e-1
105
+ #define CUDART_PIO4_LO 3.0616169978683830e-17
106
+ #define CUDART_PIO2 1.5707963267948966e+0
107
+ #define CUDART_PIO2_HI 1.5707963267948966e+0
108
+ #define CUDART_PIO2_LO 6.1232339957367660e-17
109
+ #define CUDART_3PIO4 2.3561944901923448e+0
110
+ #define CUDART_2_OVER_PI 6.3661977236758138e-1
111
+ #define CUDART_PI 3.1415926535897931e+0
112
+ #define CUDART_PI_HI 3.1415926535897931e+0
113
+ #define CUDART_PI_LO 1.2246467991473532e-16
114
+ #define CUDART_SQRT_2PI 2.5066282746310007e+0
115
+ #define CUDART_SQRT_2PI_HI 2.5066282746310007e+0
116
+ #define CUDART_SQRT_2PI_LO (-1.8328579980459167e-16)
117
+ #define CUDART_SQRT_PIO2 1.2533141373155003e+0
118
+ #define CUDART_SQRT_PIO2_HI 1.2533141373155003e+0
119
+ #define CUDART_SQRT_PIO2_LO (-9.1642899902295834e-17)
120
+ #define CUDART_SQRT_2OPI 7.9788456080286536e-1
121
+ #define CUDART_L2E 1.4426950408889634e+0
122
+ #define CUDART_L2E_HI 1.4426950408889634e+0
123
+ #define CUDART_L2E_LO 2.0355273740931033e-17
124
+ #define CUDART_L2T 3.3219280948873622e+0
125
+ #define CUDART_LG2 3.0102999566398120e-1
126
+ #define CUDART_LG2_HI 3.0102999566398120e-1
127
+ #define CUDART_LG2_LO (-2.8037281277851704e-18)
128
+ #define CUDART_LGE 4.3429448190325182e-1
129
+ #define CUDART_LGE_HI 4.3429448190325182e-1
130
+ #define CUDART_LGE_LO 1.09831965021676510e-17
131
+ #define CUDART_LN2 6.9314718055994529e-1
132
+ #define CUDART_LN2_HI 6.9314718055994529e-1
133
+ #define CUDART_LN2_LO 2.3190468138462996e-17
134
+ #define CUDART_LNT 2.3025850929940459e+0
135
+ #define CUDART_LNT_HI 2.3025850929940459e+0
136
+ #define CUDART_LNT_LO (-2.1707562233822494e-16)
137
+ #define CUDART_LNPI 1.1447298858494002e+0
138
+ #define CUDART_LN2_X_1024 7.0978271289338397e+2
139
+ #define CUDART_LN2_X_1025 7.1047586007394398e+2
140
+ #define CUDART_LN2_X_1075 7.4513321910194122e+2
141
+ #define CUDART_LG2_X_1024 3.0825471555991675e+2
142
+ #define CUDART_LG2_X_1075 3.2360724533877976e+2
143
+ #define CUDART_TWO_TO_23 8388608.0
144
+ #define CUDART_TWO_TO_52 4503599627370496.0
145
+ #define CUDART_TWO_TO_53 9007199254740992.0
146
+ #define CUDART_TWO_TO_54 18014398509481984.0
147
+ #define CUDART_TWO_TO_M54 5.5511151231257827e-17
148
+ #define CUDART_TWO_TO_M1022 2.22507385850720140e-308
149
+ #define CUDART_TRIG_PLOSS 2147483648.0
150
+ #define CUDART_DBL2INT_CVT 6755399441055744.0
151
+
152
+ #endif /* !__MATH_CONSTANTS_H__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.35.235 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_ATOMIC_FUNCTIONS_H__)
51
+ #define __SM_32_ATOMIC_FUNCTIONS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ #if defined(_NVHPC_CUDA)
72
+ #undef __device_builtin__
73
+ #define __device_builtin__ __location__(device) __location__(host)
74
+ #endif /* _NVHPC_CUDA */
75
+
76
+ #ifndef __CUDA_ARCH__
77
+ #define __DEF_IF_HOST { }
78
+ #else /* !__CUDA_ARCH__ */
79
+ #define __DEF_IF_HOST ;
80
+ #endif /* __CUDA_ARCH__ */
81
+
82
+
83
+ #ifdef __CUDA_ARCH__
84
+ extern "C"
85
+ {
86
+ extern __device__ __device_builtin__ long long __illAtomicMin(long long *address, long long val);
87
+ extern __device__ __device_builtin__ long long __illAtomicMax(long long *address, long long val);
88
+ extern __device__ __device_builtin__ long long __llAtomicAnd(long long *address, long long val);
89
+ extern __device__ __device_builtin__ long long __llAtomicOr(long long *address, long long val);
90
+ extern __device__ __device_builtin__ long long __llAtomicXor(long long *address, long long val);
91
+ extern __device__ __device_builtin__ unsigned long long __ullAtomicMin(unsigned long long *address, unsigned long long val);
92
+ extern __device__ __device_builtin__ unsigned long long __ullAtomicMax(unsigned long long *address, unsigned long long val);
93
+ extern __device__ __device_builtin__ unsigned long long __ullAtomicAnd(unsigned long long *address, unsigned long long val);
94
+ extern __device__ __device_builtin__ unsigned long long __ullAtomicOr (unsigned long long *address, unsigned long long val);
95
+ extern __device__ __device_builtin__ unsigned long long __ullAtomicXor(unsigned long long *address, unsigned long long val);
96
+ }
97
+ #endif /* __CUDA_ARCH__ */
98
+
99
+ #if defined(_NVHPC_CUDA)
100
+ #undef __device_builtin__
101
+ #define __device_builtin__
102
+ #endif /* _NVHPC_CUDA */
103
+
104
+ /*******************************************************************************
105
+ * *
106
+ * *
107
+ * *
108
+ *******************************************************************************/
109
+
110
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val) __DEF_IF_HOST
111
+
112
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val) __DEF_IF_HOST
113
+
114
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val) __DEF_IF_HOST
115
+
116
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val) __DEF_IF_HOST
117
+
118
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val) __DEF_IF_HOST
119
+
120
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
121
+
122
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
123
+
124
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
125
+
126
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
127
+
128
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
129
+
130
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
131
+
132
+ #endif /* __cplusplus && __CUDACC__ */
133
+
134
+ #undef __DEF_IF_HOST
135
+ #undef __SM_32_ATOMIC_FUNCTIONS_DECL__
136
+
137
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
138
+ #include "sm_32_atomic_functions.hpp"
139
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
140
+
141
+ #endif /* !__SM_32_ATOMIC_FUNCTIONS_H__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_INTRINSICS_HPP__)
51
+ #define __SM_32_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ // In here are intrinsics which are built in to the compiler. These may be
72
+ // referenced by intrinsic implementations from this file.
73
+ extern "C"
74
+ {
75
+ // There are no intrinsics built in to the compiler for SM-3.5,
76
+ // all intrinsics are now implemented as inline PTX below.
77
+ }
78
+
79
+ /*******************************************************************************
80
+ * *
81
+ * Below are implementations of SM-3.5 intrinsics which are included as *
82
+ * source (instead of being built in to the compiler) *
83
+ * *
84
+ *******************************************************************************/
85
+
86
+ // LDG is a "load from global via texture path" command which can exhibit higher
87
+ // bandwidth on GK110 than a regular LD.
88
+ // Define a different pointer storage size for 64 and 32 bit
89
+ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
90
+ #define __LDG_PTR "l"
91
+ #else
92
+ #define __LDG_PTR "r"
93
+ #endif
94
+
95
+ /******************************************************************************
96
+ * __ldg *
97
+ ******************************************************************************/
98
+
99
+ // Size of long is architecture and OS specific.
100
+ #if defined(__LP64__) // 64 bits
101
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
102
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
103
+ #else // 32 bits
104
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
105
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
106
+ #endif
107
+
108
+
109
+ __SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
110
+ __SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
111
+ __SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
112
+ __SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
113
+ __SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
114
+ __SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
115
+ __SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
116
+ __SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
117
+ __SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
118
+ __SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
119
+ __SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
120
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
121
+
122
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
123
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
124
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
125
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
126
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
127
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
128
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
129
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
130
+ __SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
131
+ __SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
132
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
133
+
134
+ __SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
135
+ __SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
136
+ __SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
137
+ __SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
138
+ __SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
139
+
140
+
141
+ /******************************************************************************
142
+ * __ldcg *
143
+ ******************************************************************************/
144
+
145
+ // Size of long is architecture and OS specific.
146
+ #if defined(__LP64__) // 64 bits
147
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
148
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
149
+ #else // 32 bits
150
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
151
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
152
+ #endif
153
+
154
+
155
+ __SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
156
+ __SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
157
+ __SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
158
+ __SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
159
+ __SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
160
+ __SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
161
+ __SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
162
+ __SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
163
+ __SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
164
+ __SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
165
+ __SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
166
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
167
+
168
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
169
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
170
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
171
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
172
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
173
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
174
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
175
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
176
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
177
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
178
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
179
+
180
+ __SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
181
+ __SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
182
+ __SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
183
+ __SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
184
+ __SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
185
+
186
+ /******************************************************************************
187
+ * __ldca *
188
+ ******************************************************************************/
189
+
190
+ // Size of long is architecture and OS specific.
191
+ #if defined(__LP64__) // 64 bits
192
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
193
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
194
+ #else // 32 bits
195
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
196
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
197
+ #endif
198
+
199
+
200
+ __SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
201
+ __SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
202
+ __SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
203
+ __SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
204
+ __SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
205
+ __SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
206
+ __SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
207
+ __SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
208
+ __SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
209
+ __SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
210
+ __SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
211
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
212
+
213
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
214
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
215
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
216
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
217
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
218
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
219
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
220
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
221
+ __SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
222
+ __SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
223
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
224
+
225
+ __SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
226
+ __SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
227
+ __SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
228
+ __SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
229
+ __SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
230
+
231
+ /******************************************************************************
232
+ * __ldcs *
233
+ ******************************************************************************/
234
+
235
+ // Size of long is architecture and OS specific.
236
+ #if defined(__LP64__) // 64 bits
237
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
238
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
239
+ #else // 32 bits
240
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
241
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
242
+ #endif
243
+
244
+
245
+ __SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
246
+ __SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
247
+ __SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
248
+ __SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
249
+ __SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
250
+ __SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
251
+ __SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
252
+ __SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
253
+ __SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
254
+ __SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
255
+ __SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
256
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
257
+
258
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
259
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
260
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
261
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
262
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
263
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
264
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
265
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
266
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
267
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
268
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
269
+
270
+ __SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
271
+ __SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
272
+ __SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
273
+ __SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
274
+ __SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
275
+
276
+ /******************************************************************************
277
+ * __ldlu *
278
+ ******************************************************************************/
279
+
280
+ // Size of long is architecture and OS specific.
281
+ #if defined(__LP64__) // 64 bits
282
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
283
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
284
+ #else // 32 bits
285
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
286
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
287
+ #endif
288
+
289
+
290
+ __SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
291
+ __SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
292
+ __SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
293
+ __SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
294
+ __SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
295
+ __SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
296
+ __SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
297
+ __SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
298
+ __SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
299
+ __SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
300
+ __SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
301
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
302
+
303
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
304
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
305
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
306
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
307
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
308
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
309
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
310
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
311
+ __SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
312
+ __SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
313
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
314
+
315
+ __SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
316
+ __SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
317
+ __SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
318
+ __SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
319
+ __SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
320
+
321
+ /******************************************************************************
322
+ * __ldcv *
323
+ ******************************************************************************/
324
+
325
+ // Size of long is architecture and OS specific.
326
+ #if defined(__LP64__) // 64 bits
327
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
328
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
329
+ #else // 32 bits
330
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
331
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
332
+ #endif
333
+
334
+
335
+ __SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
336
+ __SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
337
+ __SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
338
+ __SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
339
+ __SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
340
+ __SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
341
+ __SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
342
+ __SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
343
+ __SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
344
+ __SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
345
+ __SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
346
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
347
+
348
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
349
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
350
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
351
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
352
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
353
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
354
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
355
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
356
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
357
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
358
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
359
+
360
+ __SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
361
+ __SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
362
+ __SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
363
+ __SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
364
+ __SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
365
+
366
+ /******************************************************************************
367
+ * __stwb *
368
+ ******************************************************************************/
369
+
370
+ // Size of long is architecture and OS specific.
371
+ #if defined(__LP64__) // 64 bits
372
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
373
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
374
+ #else // 32 bits
375
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
376
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
377
+ #endif
378
+
379
+
380
+ __SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
381
+ __SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
382
+ __SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
383
+ __SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
384
+ __SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
385
+ __SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
386
+ __SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
387
+ __SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
388
+ __SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
389
+ __SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
390
+ __SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
391
+ __SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
392
+
393
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
394
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
395
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
396
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
397
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
398
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
399
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
400
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
401
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
402
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
403
+ __SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
404
+
405
+ __SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
406
+ __SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
407
+ __SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
408
+ __SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
409
+ __SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
410
+
411
+ /******************************************************************************
412
+ * __stcg *
413
+ ******************************************************************************/
414
+
415
+ // Size of long is architecture and OS specific.
416
+ #if defined(__LP64__) // 64 bits
417
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
418
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
419
+ #else // 32 bits
420
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
421
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
422
+ #endif
423
+
424
+
425
+ __SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
426
+ __SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
427
+ __SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
428
+ __SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
429
+ __SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
430
+ __SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
431
+ __SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
432
+ __SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
433
+ __SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
434
+ __SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
435
+ __SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
436
+ __SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
437
+
438
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
439
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
440
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
441
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
442
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
443
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
444
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
445
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
446
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
447
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
448
+ __SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
449
+
450
+ __SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
451
+ __SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
452
+ __SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
453
+ __SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
454
+ __SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
455
+
456
+ /******************************************************************************
457
+ * __stcs *
458
+ ******************************************************************************/
459
+
460
+ // Size of long is architecture and OS specific.
461
+ #if defined(__LP64__) // 64 bits
462
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
463
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
464
+ #else // 32 bits
465
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
466
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
467
+ #endif
468
+
469
+
470
+ __SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
471
+ __SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
472
+ __SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
473
+ __SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
474
+ __SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
475
+ __SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
476
+ __SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
477
+ __SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
478
+ __SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
479
+ __SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
480
+ __SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
481
+ __SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
482
+
483
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
484
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
485
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
486
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
487
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
488
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
489
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
490
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
491
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
492
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
493
+ __SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
494
+
495
+ __SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
496
+ __SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
497
+ __SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
498
+ __SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
499
+ __SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
500
+
501
+ /******************************************************************************
502
+ * __stwt *
503
+ ******************************************************************************/
504
+
505
+ // Size of long is architecture and OS specific.
506
+ #if defined(__LP64__) // 64 bits
507
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
508
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
509
+ #else // 32 bits
510
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
511
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
512
+ #endif
513
+
514
+
515
+ __SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
516
+ __SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
517
+ __SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
518
+ __SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
519
+ __SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
520
+ __SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
521
+ __SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
522
+ __SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
523
+ __SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
524
+ __SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
525
+ __SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
526
+ __SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
527
+
528
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
529
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
530
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
531
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
532
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
533
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
534
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
535
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
536
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
537
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
538
+ __SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
539
+
540
+ __SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
541
+ __SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
542
+ __SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
543
+ __SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
544
+ __SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
545
+
546
+ #undef __LDG_PTR
547
+
548
+
549
+ // SHF is the "funnel shift" operation - an accelerated left/right shift with carry
550
+ // operating on 64-bit quantities, which are concatenations of two 32-bit registers.
551
+
552
+ // This shifts [b:a] left by "shift" bits, returning the most significant bits of the result.
553
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift)
554
+ {
555
+ unsigned int ret;
556
+ asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
557
+ return ret;
558
+ }
559
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift)
560
+ {
561
+ unsigned int ret;
562
+ asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
563
+ return ret;
564
+ }
565
+
566
+ // This shifts [b:a] right by "shift" bits, returning the least significant bits of the result.
567
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift)
568
+ {
569
+ unsigned int ret;
570
+ asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
571
+ return ret;
572
+ }
573
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift)
574
+ {
575
+ unsigned int ret;
576
+ asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
577
+ return ret;
578
+ }
579
+
580
+
581
+ #endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
582
+
583
+ #endif /* __cplusplus && __CUDACC__ */
584
+
585
+ #undef __SM_32_INTRINSICS_DECL__
586
+
587
+ #endif /* !__SM_32_INTRINSICS_HPP__ */
588
+
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SURFACE_FUNCTIONS_H__)
51
+ #define __SURFACE_FUNCTIONS_H__
52
+
53
+
54
+ #if defined(__cplusplus) && defined(__CUDACC__)
55
+
56
+ /*******************************************************************************
57
+ * *
58
+ * *
59
+ * *
60
+ *******************************************************************************/
61
+
62
+ #include "cuda_runtime_api.h"
63
+ #include "cuda_surface_types.h"
64
+
65
+ #if defined(_WIN32)
66
+ # define __DEPRECATED__ __declspec(deprecated)
67
+ #else
68
+ # define __DEPRECATED__ __attribute__((deprecated))
69
+ #endif
70
+
71
+ template <typename T> struct __nv_surf_trait { typedef void * cast_type; };
72
+
73
+ template<> struct __nv_surf_trait<char> { typedef char * cast_type; };
74
+ template<> struct __nv_surf_trait<signed char> { typedef signed char * cast_type; };
75
+ template<> struct __nv_surf_trait<unsigned char> { typedef unsigned char * cast_type; };
76
+ template<> struct __nv_surf_trait<char1> { typedef char1 * cast_type; };
77
+ template<> struct __nv_surf_trait<uchar1> { typedef uchar1 * cast_type; };
78
+ template<> struct __nv_surf_trait<char2> { typedef char2 * cast_type; };
79
+ template<> struct __nv_surf_trait<uchar2> { typedef uchar2 * cast_type; };
80
+ template<> struct __nv_surf_trait<char4> { typedef char4 * cast_type; };
81
+ template<> struct __nv_surf_trait<uchar4> { typedef uchar4 * cast_type; };
82
+ template<> struct __nv_surf_trait<short> { typedef short * cast_type; };
83
+ template<> struct __nv_surf_trait<unsigned short> { typedef unsigned short * cast_type; };
84
+ template<> struct __nv_surf_trait<short1> { typedef short1 * cast_type; };
85
+ template<> struct __nv_surf_trait<ushort1> { typedef ushort1 * cast_type; };
86
+ template<> struct __nv_surf_trait<short2> { typedef short2 * cast_type; };
87
+ template<> struct __nv_surf_trait<ushort2> { typedef ushort2 * cast_type; };
88
+ template<> struct __nv_surf_trait<short4> { typedef short4 * cast_type; };
89
+ template<> struct __nv_surf_trait<ushort4> { typedef ushort4 * cast_type; };
90
+ template<> struct __nv_surf_trait<int> { typedef int * cast_type; };
91
+ template<> struct __nv_surf_trait<unsigned int> { typedef unsigned int * cast_type; };
92
+ template<> struct __nv_surf_trait<int1> { typedef int1 * cast_type; };
93
+ template<> struct __nv_surf_trait<uint1> { typedef uint1 * cast_type; };
94
+ template<> struct __nv_surf_trait<int2> { typedef int2 * cast_type; };
95
+ template<> struct __nv_surf_trait<uint2> { typedef uint2 * cast_type; };
96
+ template<> struct __nv_surf_trait<int4> { typedef int4 * cast_type; };
97
+ template<> struct __nv_surf_trait<uint4> { typedef uint4 * cast_type; };
98
+ template<> struct __nv_surf_trait<long long> { typedef long long * cast_type; };
99
+ template<> struct __nv_surf_trait<unsigned long long> { typedef unsigned long long * cast_type; };
100
+ template<> struct __nv_surf_trait<longlong1> { typedef longlong1 * cast_type; };
101
+ template<> struct __nv_surf_trait<ulonglong1> { typedef ulonglong1 * cast_type; };
102
+ template<> struct __nv_surf_trait<longlong2> { typedef longlong2 * cast_type; };
103
+ template<> struct __nv_surf_trait<ulonglong2> { typedef ulonglong2 * cast_type; };
104
+ #if !defined(__LP64__)
105
+ template<> struct __nv_surf_trait<long> { typedef int * cast_type; };
106
+ template<> struct __nv_surf_trait<unsigned long> { typedef unsigned int * cast_type; };
107
+ template<> struct __nv_surf_trait<long1> { typedef int1 * cast_type; };
108
+ template<> struct __nv_surf_trait<ulong1> { typedef uint1 * cast_type; };
109
+ template<> struct __nv_surf_trait<long2> { typedef int2 * cast_type; };
110
+ template<> struct __nv_surf_trait<ulong2> { typedef uint2 * cast_type; };
111
+ template<> struct __nv_surf_trait<long4> { typedef uint4 * cast_type; };
112
+ template<> struct __nv_surf_trait<ulong4> { typedef int4 * cast_type; };
113
+ #endif
114
+ template<> struct __nv_surf_trait<float> { typedef float * cast_type; };
115
+ template<> struct __nv_surf_trait<float1> { typedef float1 * cast_type; };
116
+ template<> struct __nv_surf_trait<float2> { typedef float2 * cast_type; };
117
+ template<> struct __nv_surf_trait<float4> { typedef float4 * cast_type; };
118
+
119
+
120
+ #undef __DEPRECATED__
121
+
122
+
123
+ #endif /* __cplusplus && __CUDACC__ */
124
+ #endif /* !__SURFACE_FUNCTIONS_H__ */
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_indirect_functions.h ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #ifndef __TEXTURE_INDIRECT_FUNCTIONS_H__
52
+ #define __TEXTURE_INDIRECT_FUNCTIONS_H__
53
+
54
+
55
+ #if defined(__cplusplus) && defined(__CUDACC__)
56
+
57
+
58
+ #include "cuda_runtime_api.h"
59
+
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)
62
+ #define __NV_TEX_SPARSE 1
63
+ #endif /* endif */
64
+
65
+ template <typename T> struct __nv_itex_trait { };
66
+ template<> struct __nv_itex_trait<char> { typedef void type; };
67
+ template<> struct __nv_itex_trait<signed char> { typedef void type; };
68
+ template<> struct __nv_itex_trait<char1> { typedef void type; };
69
+ template<> struct __nv_itex_trait<char2> { typedef void type; };
70
+ template<> struct __nv_itex_trait<char4> { typedef void type; };
71
+ template<> struct __nv_itex_trait<unsigned char> { typedef void type; };
72
+ template<> struct __nv_itex_trait<uchar1> { typedef void type; };
73
+ template<> struct __nv_itex_trait<uchar2> { typedef void type; };
74
+ template<> struct __nv_itex_trait<uchar4> { typedef void type; };
75
+ template<> struct __nv_itex_trait<short> { typedef void type; };
76
+ template<> struct __nv_itex_trait<short1> { typedef void type; };
77
+ template<> struct __nv_itex_trait<short2> { typedef void type; };
78
+ template<> struct __nv_itex_trait<short4> { typedef void type; };
79
+ template<> struct __nv_itex_trait<unsigned short> { typedef void type; };
80
+ template<> struct __nv_itex_trait<ushort1> { typedef void type; };
81
+ template<> struct __nv_itex_trait<ushort2> { typedef void type; };
82
+ template<> struct __nv_itex_trait<ushort4> { typedef void type; };
83
+ template<> struct __nv_itex_trait<int> { typedef void type; };
84
+ template<> struct __nv_itex_trait<int1> { typedef void type; };
85
+ template<> struct __nv_itex_trait<int2> { typedef void type; };
86
+ template<> struct __nv_itex_trait<int4> { typedef void type; };
87
+ template<> struct __nv_itex_trait<unsigned int> { typedef void type; };
88
+ template<> struct __nv_itex_trait<uint1> { typedef void type; };
89
+ template<> struct __nv_itex_trait<uint2> { typedef void type; };
90
+ template<> struct __nv_itex_trait<uint4> { typedef void type; };
91
+ #if !defined(__LP64__)
92
+ template<> struct __nv_itex_trait<long> { typedef void type; };
93
+ template<> struct __nv_itex_trait<long1> { typedef void type; };
94
+ template<> struct __nv_itex_trait<long2> { typedef void type; };
95
+ template<> struct __nv_itex_trait<long4> { typedef void type; };
96
+ template<> struct __nv_itex_trait<unsigned long> { typedef void type; };
97
+ template<> struct __nv_itex_trait<ulong1> { typedef void type; };
98
+ template<> struct __nv_itex_trait<ulong2> { typedef void type; };
99
+ template<> struct __nv_itex_trait<ulong4> { typedef void type; };
100
+ #endif /* !__LP64__ */
101
+ template<> struct __nv_itex_trait<float> { typedef void type; };
102
+ template<> struct __nv_itex_trait<float1> { typedef void type; };
103
+ template<> struct __nv_itex_trait<float2> { typedef void type; };
104
+ template<> struct __nv_itex_trait<float4> { typedef void type; };
105
+
106
+
107
+
108
+ template <typename T>
109
+ static __device__ typename __nv_itex_trait<T>::type tex1Dfetch(T *ptr, cudaTextureObject_t obj, int x)
110
+ {
111
+ __nv_tex_surf_handler("__itex1Dfetch", ptr, obj, x);
112
+ }
113
+
114
+ template <class T>
115
+ static __device__ T tex1Dfetch(cudaTextureObject_t texObject, int x)
116
+ {
117
+ T ret;
118
+ tex1Dfetch(&ret, texObject, x);
119
+ return ret;
120
+ }
121
+
122
+ template <typename T>
123
+ static __device__ typename __nv_itex_trait<T>::type tex1D(T *ptr, cudaTextureObject_t obj, float x)
124
+ {
125
+ __nv_tex_surf_handler("__itex1D", ptr, obj, x);
126
+ }
127
+
128
+
129
+ template <class T>
130
+ static __device__ T tex1D(cudaTextureObject_t texObject, float x)
131
+ {
132
+ T ret;
133
+ tex1D(&ret, texObject, x);
134
+ return ret;
135
+ }
136
+
137
+
138
+ template <typename T>
139
+ static __device__ typename __nv_itex_trait<T>::type tex2D(T *ptr, cudaTextureObject_t obj, float x, float y)
140
+ {
141
+ __nv_tex_surf_handler("__itex2D", ptr, obj, x, y);
142
+ }
143
+
144
+ template <class T>
145
+ static __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
146
+ {
147
+ T ret;
148
+ tex2D(&ret, texObject, x, y);
149
+ return ret;
150
+ }
151
+
152
+ #if __NV_TEX_SPARSE
153
+ template <typename T>
154
+ static __device__ typename __nv_itex_trait<T>::type tex2D(T *ptr, cudaTextureObject_t obj, float x, float y,
155
+ bool* isResident)
156
+ {
157
+ unsigned char res;
158
+ __nv_tex_surf_handler("__itex2D_sparse", ptr, obj, x, y, &res);
159
+ *isResident = (res != 0);
160
+ }
161
+
162
+ template <class T>
163
+ static __device__ T tex2D(cudaTextureObject_t texObject, float x, float y, bool* isResident)
164
+ {
165
+ T ret;
166
+ tex2D(&ret, texObject, x, y, isResident);
167
+ return ret;
168
+ }
169
+
170
+ #endif /* __NV_TEX_SPARSE */
171
+
172
+
173
+ template <typename T>
174
+ static __device__ typename __nv_itex_trait<T>::type tex3D(T *ptr, cudaTextureObject_t obj, float x, float y, float z)
175
+ {
176
+ __nv_tex_surf_handler("__itex3D", ptr, obj, x, y, z);
177
+ }
178
+
179
+ template <class T>
180
+ static __device__ T tex3D(cudaTextureObject_t texObject, float x, float y, float z)
181
+ {
182
+ T ret;
183
+ tex3D(&ret, texObject, x, y, z);
184
+ return ret;
185
+ }
186
+
187
+ #if __NV_TEX_SPARSE
188
+ template <typename T>
189
+ static __device__ typename __nv_itex_trait<T>::type tex3D(T *ptr, cudaTextureObject_t obj, float x, float y, float z,
190
+ bool* isResident)
191
+ {
192
+ unsigned char res;
193
+ __nv_tex_surf_handler("__itex3D_sparse", ptr, obj, x, y, z, &res);
194
+ *isResident = (res != 0);
195
+ }
196
+
197
+ template <class T>
198
+ static __device__ T tex3D(cudaTextureObject_t texObject, float x, float y, float z, bool* isResident)
199
+ {
200
+ T ret;
201
+ tex3D(&ret, texObject, x, y, z, isResident);
202
+ return ret;
203
+ }
204
+ #endif /* __NV_TEX_SPARSE */
205
+
206
+
207
+ template <typename T>
208
+ static __device__ typename __nv_itex_trait<T>::type tex1DLayered(T *ptr, cudaTextureObject_t obj, float x, int layer)
209
+ {
210
+ __nv_tex_surf_handler("__itex1DLayered", ptr, obj, x, layer);
211
+ }
212
+
213
+ template <class T>
214
+ static __device__ T tex1DLayered(cudaTextureObject_t texObject, float x, int layer)
215
+ {
216
+ T ret;
217
+ tex1DLayered(&ret, texObject, x, layer);
218
+ return ret;
219
+ }
220
+
221
+ template <typename T>
222
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayered(T *ptr, cudaTextureObject_t obj, float x, float y, int layer)
223
+ {
224
+ __nv_tex_surf_handler("__itex2DLayered", ptr, obj, x, y, layer);
225
+ }
226
+
227
+ template <class T>
228
+ static __device__ T tex2DLayered(cudaTextureObject_t texObject, float x, float y, int layer)
229
+ {
230
+ T ret;
231
+ tex2DLayered(&ret, texObject, x, y, layer);
232
+ return ret;
233
+ }
234
+
235
+ #if __NV_TEX_SPARSE
236
+ template <typename T>
237
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayered(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, bool* isResident)
238
+ {
239
+ unsigned char res;
240
+ __nv_tex_surf_handler("__itex2DLayered_sparse", ptr, obj, x, y, layer, &res);
241
+ *isResident = (res != 0);
242
+ }
243
+
244
+ template <class T>
245
+ static __device__ T tex2DLayered(cudaTextureObject_t texObject, float x, float y, int layer, bool* isResident)
246
+ {
247
+ T ret;
248
+ tex2DLayered(&ret, texObject, x, y, layer, isResident);
249
+ return ret;
250
+ }
251
+ #endif /* __NV_TEX_SPARSE */
252
+
253
+
254
+ template <typename T>
255
+ static __device__ typename __nv_itex_trait<T>::type texCubemap(T *ptr, cudaTextureObject_t obj, float x, float y, float z)
256
+ {
257
+ __nv_tex_surf_handler("__itexCubemap", ptr, obj, x, y, z);
258
+ }
259
+
260
+
261
+ template <class T>
262
+ static __device__ T texCubemap(cudaTextureObject_t texObject, float x, float y, float z)
263
+ {
264
+ T ret;
265
+ texCubemap(&ret, texObject, x, y, z);
266
+ return ret;
267
+ }
268
+
269
+
270
+ template <typename T>
271
+ static __device__ typename __nv_itex_trait<T>::type texCubemapLayered(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer)
272
+ {
273
+ __nv_tex_surf_handler("__itexCubemapLayered", ptr, obj, x, y, z, layer);
274
+ }
275
+
276
+ template <class T>
277
+ static __device__ T texCubemapLayered(cudaTextureObject_t texObject, float x, float y, float z, int layer)
278
+ {
279
+ T ret;
280
+ texCubemapLayered(&ret, texObject, x, y, z, layer);
281
+ return ret;
282
+ }
283
+
284
+ template <typename T>
285
+ static __device__ typename __nv_itex_trait<T>::type tex2Dgather(T *ptr, cudaTextureObject_t obj, float x, float y, int comp = 0)
286
+ {
287
+ __nv_tex_surf_handler("__itex2Dgather", ptr, obj, x, y, comp);
288
+ }
289
+
290
+ template <class T>
291
+ static __device__ T tex2Dgather(cudaTextureObject_t to, float x, float y, int comp = 0)
292
+ {
293
+ T ret;
294
+ tex2Dgather(&ret, to, x, y, comp);
295
+ return ret;
296
+ }
297
+
298
+ #if __NV_TEX_SPARSE
299
+ template <typename T>
300
+ static __device__ typename __nv_itex_trait<T>::type tex2Dgather(T *ptr, cudaTextureObject_t obj, float x, float y, bool* isResident, int comp = 0)
301
+ {
302
+ unsigned char res;
303
+ __nv_tex_surf_handler("__itex2Dgather_sparse", ptr, obj, x, y, comp, &res);
304
+ *isResident = (res != 0);
305
+ }
306
+
307
+ template <class T>
308
+ static __device__ T tex2Dgather(cudaTextureObject_t to, float x, float y, bool* isResident, int comp = 0)
309
+ {
310
+ T ret;
311
+ tex2Dgather(&ret, to, x, y, isResident, comp);
312
+ return ret;
313
+ }
314
+
315
+ #endif /* __NV_TEX_SPARSE */
316
+
317
+ template <typename T>
318
+ static __device__ typename __nv_itex_trait<T>::type tex1DLod(T *ptr, cudaTextureObject_t obj, float x, float level)
319
+ {
320
+ __nv_tex_surf_handler("__itex1DLod", ptr, obj, x, level);
321
+ }
322
+
323
+ template <class T>
324
+ static __device__ T tex1DLod(cudaTextureObject_t texObject, float x, float level)
325
+ {
326
+ T ret;
327
+ tex1DLod(&ret, texObject, x, level);
328
+ return ret;
329
+ }
330
+
331
+
332
+ template <typename T>
333
+ static __device__ typename __nv_itex_trait<T>::type tex2DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float level)
334
+ {
335
+ __nv_tex_surf_handler("__itex2DLod", ptr, obj, x, y, level);
336
+ }
337
+
338
+ template <class T>
339
+ static __device__ T tex2DLod(cudaTextureObject_t texObject, float x, float y, float level)
340
+ {
341
+ T ret;
342
+ tex2DLod(&ret, texObject, x, y, level);
343
+ return ret;
344
+ }
345
+
346
+ #if __NV_TEX_SPARSE
347
+
348
+ template <typename T>
349
+ static __device__ typename __nv_itex_trait<T>::type tex2DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float level, bool* isResident)
350
+ {
351
+ unsigned char res;
352
+ __nv_tex_surf_handler("__itex2DLod_sparse", ptr, obj, x, y, level, &res);
353
+ *isResident = (res != 0);
354
+ }
355
+
356
+ template <class T>
357
+ static __device__ T tex2DLod(cudaTextureObject_t texObject, float x, float y, float level, bool* isResident)
358
+ {
359
+ T ret;
360
+ tex2DLod(&ret, texObject, x, y, level, isResident);
361
+ return ret;
362
+ }
363
+
364
+ #endif /* __NV_TEX_SPARSE */
365
+
366
+
367
+ template <typename T>
368
+ static __device__ typename __nv_itex_trait<T>::type tex3DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level)
369
+ {
370
+ __nv_tex_surf_handler("__itex3DLod", ptr, obj, x, y, z, level);
371
+ }
372
+
373
+ template <class T>
374
+ static __device__ T tex3DLod(cudaTextureObject_t texObject, float x, float y, float z, float level)
375
+ {
376
+ T ret;
377
+ tex3DLod(&ret, texObject, x, y, z, level);
378
+ return ret;
379
+ }
380
+
381
+ #if __NV_TEX_SPARSE
382
+ template <typename T>
383
+ static __device__ typename __nv_itex_trait<T>::type tex3DLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level, bool* isResident)
384
+ {
385
+ unsigned char res;
386
+ __nv_tex_surf_handler("__itex3DLod_sparse", ptr, obj, x, y, z, level, &res);
387
+ *isResident = (res != 0);
388
+ }
389
+
390
+ template <class T>
391
+ static __device__ T tex3DLod(cudaTextureObject_t texObject, float x, float y, float z, float level, bool* isResident)
392
+ {
393
+ T ret;
394
+ tex3DLod(&ret, texObject, x, y, z, level, isResident);
395
+ return ret;
396
+ }
397
+
398
+ #endif /* __NV_TEX_SPARSE */
399
+
400
+
401
+ template <typename T>
402
+ static __device__ typename __nv_itex_trait<T>::type tex1DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, int layer, float level)
403
+ {
404
+ __nv_tex_surf_handler("__itex1DLayeredLod", ptr, obj, x, layer, level);
405
+ }
406
+
407
+ template <class T>
408
+ static __device__ T tex1DLayeredLod(cudaTextureObject_t texObject, float x, int layer, float level)
409
+ {
410
+ T ret;
411
+ tex1DLayeredLod(&ret, texObject, x, layer, level);
412
+ return ret;
413
+ }
414
+
415
+
416
+ template <typename T>
417
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, float level)
418
+ {
419
+ __nv_tex_surf_handler("__itex2DLayeredLod", ptr, obj, x, y, layer, level);
420
+ }
421
+
422
+ template <class T>
423
+ static __device__ T tex2DLayeredLod(cudaTextureObject_t texObject, float x, float y, int layer, float level)
424
+ {
425
+ T ret;
426
+ tex2DLayeredLod(&ret, texObject, x, y, layer, level);
427
+ return ret;
428
+ }
429
+
430
+ #if __NV_TEX_SPARSE
431
+ template <typename T>
432
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, int layer, float level, bool* isResident)
433
+ {
434
+ unsigned char res;
435
+ __nv_tex_surf_handler("__itex2DLayeredLod_sparse", ptr, obj, x, y, layer, level, &res);
436
+ *isResident = (res != 0);
437
+ }
438
+
439
+ template <class T>
440
+ static __device__ T tex2DLayeredLod(cudaTextureObject_t texObject, float x, float y, int layer, float level, bool* isResident)
441
+ {
442
+ T ret;
443
+ tex2DLayeredLod(&ret, texObject, x, y, layer, level, isResident);
444
+ return ret;
445
+ }
446
+ #endif /* __NV_TEX_SPARSE */
447
+
448
+ template <typename T>
449
+ static __device__ typename __nv_itex_trait<T>::type texCubemapLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float level)
450
+ {
451
+ __nv_tex_surf_handler("__itexCubemapLod", ptr, obj, x, y, z, level);
452
+ }
453
+
454
+ template <class T>
455
+ static __device__ T texCubemapLod(cudaTextureObject_t texObject, float x, float y, float z, float level)
456
+ {
457
+ T ret;
458
+ texCubemapLod(&ret, texObject, x, y, z, level);
459
+ return ret;
460
+ }
461
+
462
+
463
+ template <typename T>
464
+ static __device__ typename __nv_itex_trait<T>::type texCubemapGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy)
465
+ {
466
+ __nv_tex_surf_handler("__itexCubemapGrad_v2", ptr, obj, x, y, z, &dPdx, &dPdy);
467
+ }
468
+
469
+ template <class T>
470
+ static __device__ T texCubemapGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy)
471
+ {
472
+ T ret;
473
+ texCubemapGrad(&ret, texObject, x, y, z, dPdx, dPdy);
474
+ return ret;
475
+ }
476
+
477
+ template <typename T>
478
+ static __device__ typename __nv_itex_trait<T>::type texCubemapLayeredLod(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer, float level)
479
+ {
480
+ __nv_tex_surf_handler("__itexCubemapLayeredLod", ptr, obj, x, y, z, layer, level);
481
+ }
482
+
483
+ template <class T>
484
+ static __device__ T texCubemapLayeredLod(cudaTextureObject_t texObject, float x, float y, float z, int layer, float level)
485
+ {
486
+ T ret;
487
+ texCubemapLayeredLod(&ret, texObject, x, y, z, layer, level);
488
+ return ret;
489
+ }
490
+
491
+ template <typename T>
492
+ static __device__ typename __nv_itex_trait<T>::type tex1DGrad(T *ptr, cudaTextureObject_t obj, float x, float dPdx, float dPdy)
493
+ {
494
+ __nv_tex_surf_handler("__itex1DGrad", ptr, obj, x, dPdx, dPdy);
495
+ }
496
+
497
+ template <class T>
498
+ static __device__ T tex1DGrad(cudaTextureObject_t texObject, float x, float dPdx, float dPdy)
499
+ {
500
+ T ret;
501
+ tex1DGrad(&ret, texObject, x, dPdx, dPdy);
502
+ return ret;
503
+ }
504
+
505
+
506
+ template <typename T>
507
+ static __device__ typename __nv_itex_trait<T>::type tex2DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float2 dPdx, float2 dPdy)
508
+ {
509
+ __nv_tex_surf_handler("__itex2DGrad_v2", ptr, obj, x, y, &dPdx, &dPdy);
510
+ }
511
+
512
+ template <class T>
513
+ static __device__ T tex2DGrad(cudaTextureObject_t texObject, float x, float y, float2 dPdx, float2 dPdy)
514
+ {
515
+ T ret;
516
+ tex2DGrad(&ret, texObject, x, y, dPdx, dPdy);
517
+ return ret;
518
+ }
519
+
520
+ #if __NV_TEX_SPARSE
521
+ template <typename T>
522
+ static __device__ typename __nv_itex_trait<T>::type tex2DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float2 dPdx, float2 dPdy, bool* isResident)
523
+ {
524
+ unsigned char res;
525
+ __nv_tex_surf_handler("__itex2DGrad_sparse", ptr, obj, x, y, &dPdx, &dPdy, &res);
526
+ *isResident = (res != 0);
527
+ }
528
+
529
+ template <class T>
530
+ static __device__ T tex2DGrad(cudaTextureObject_t texObject, float x, float y, float2 dPdx, float2 dPdy, bool* isResident)
531
+ {
532
+ T ret;
533
+ tex2DGrad(&ret, texObject, x, y, dPdx, dPdy, isResident);
534
+ return ret;
535
+ }
536
+ #endif /* __NV_TEX_SPARSE */
537
+
538
+
539
+ template <typename T>
540
+ static __device__ typename __nv_itex_trait<T>::type tex3DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy)
541
+ {
542
+ __nv_tex_surf_handler("__itex3DGrad_v2", ptr, obj, x, y, z, &dPdx, &dPdy);
543
+ }
544
+
545
+ template <class T>
546
+ static __device__ T tex3DGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy)
547
+ {
548
+ T ret;
549
+ tex3DGrad(&ret, texObject, x, y, z, dPdx, dPdy);
550
+ return ret;
551
+ }
552
+
553
+ #if __NV_TEX_SPARSE
554
+ template <typename T>
555
+ static __device__ typename __nv_itex_trait<T>::type tex3DGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, float4 dPdx, float4 dPdy, bool* isResident)
556
+ {
557
+ unsigned char res;
558
+ __nv_tex_surf_handler("__itex3DGrad_sparse", ptr, obj, x, y, z, &dPdx, &dPdy, &res);
559
+ *isResident = (res != 0);
560
+ }
561
+
562
+ template <class T>
563
+ static __device__ T tex3DGrad(cudaTextureObject_t texObject, float x, float y, float z, float4 dPdx, float4 dPdy, bool* isResident)
564
+ {
565
+ T ret;
566
+ tex3DGrad(&ret, texObject, x, y, z, dPdx, dPdy, isResident);
567
+ return ret;
568
+ }
569
+
570
+ #endif /* __NV_TEX_SPARSE */
571
+
572
+
573
+ template <typename T>
574
+ static __device__ typename __nv_itex_trait<T>::type tex1DLayeredGrad(T *ptr, cudaTextureObject_t obj, float x, int layer, float dPdx, float dPdy)
575
+ {
576
+ __nv_tex_surf_handler("__itex1DLayeredGrad", ptr, obj, x, layer, dPdx, dPdy);
577
+ }
578
+
579
+ template <class T>
580
+ static __device__ T tex1DLayeredGrad(cudaTextureObject_t texObject, float x, int layer, float dPdx, float dPdy)
581
+ {
582
+ T ret;
583
+ tex1DLayeredGrad(&ret, texObject, x, layer, dPdx, dPdy);
584
+ return ret;
585
+ }
586
+
587
+
588
+ template <typename T>
589
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayeredGrad(T * ptr, cudaTextureObject_t obj, float x, float y, int layer, float2 dPdx, float2 dPdy)
590
+ {
591
+ __nv_tex_surf_handler("__itex2DLayeredGrad_v2", ptr, obj, x, y, layer, &dPdx, &dPdy);
592
+ }
593
+
594
+ template <class T>
595
+ static __device__ T tex2DLayeredGrad(cudaTextureObject_t texObject, float x, float y, int layer, float2 dPdx, float2 dPdy)
596
+ {
597
+ T ret;
598
+ tex2DLayeredGrad(&ret, texObject, x, y, layer, dPdx, dPdy);
599
+ return ret;
600
+ }
601
+
602
+ #if __NV_TEX_SPARSE
603
+ template <typename T>
604
+ static __device__ typename __nv_itex_trait<T>::type tex2DLayeredGrad(T * ptr, cudaTextureObject_t obj, float x, float y, int layer, float2 dPdx, float2 dPdy, bool* isResident)
605
+ {
606
+ unsigned char res;
607
+ __nv_tex_surf_handler("__itex2DLayeredGrad_sparse", ptr, obj, x, y, layer, &dPdx, &dPdy, &res);
608
+ *isResident = (res != 0);
609
+ }
610
+
611
+ template <class T>
612
+ static __device__ T tex2DLayeredGrad(cudaTextureObject_t texObject, float x, float y, int layer, float2 dPdx, float2 dPdy, bool* isResident)
613
+ {
614
+ T ret;
615
+ tex2DLayeredGrad(&ret, texObject, x, y, layer, dPdx, dPdy, isResident);
616
+ return ret;
617
+ }
618
+ #endif /* __NV_TEX_SPARSE */
619
+
620
+
621
+ template <typename T>
622
+ static __device__ typename __nv_itex_trait<T>::type texCubemapLayeredGrad(T *ptr, cudaTextureObject_t obj, float x, float y, float z, int layer, float4 dPdx, float4 dPdy)
623
+ {
624
+ __nv_tex_surf_handler("__itexCubemapLayeredGrad_v2", ptr, obj, x, y, z, layer, &dPdx, &dPdy);
625
+ }
626
+
627
+ template <class T>
628
+ static __device__ T texCubemapLayeredGrad(cudaTextureObject_t texObject, float x, float y, float z, int layer, float4 dPdx, float4 dPdy)
629
+ {
630
+ T ret;
631
+ texCubemapLayeredGrad(&ret, texObject, x, y, z, layer, dPdx, dPdy);
632
+ return ret;
633
+ }
634
+
635
+ #undef __NV_TEX_SPARSE
636
+
637
+ #endif // __cplusplus && __CUDACC__
638
+ #endif // __TEXTURE_INDIRECT_FUNCTIONS_H__
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__TEXTURE_TYPES_H__)
51
+ #define __TEXTURE_TYPES_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "driver_types.h"
60
+
61
+ /**
62
+ * \addtogroup CUDART_TYPES
63
+ *
64
+ * @{
65
+ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ #define cudaTextureType1D 0x01
74
+ #define cudaTextureType2D 0x02
75
+ #define cudaTextureType3D 0x03
76
+ #define cudaTextureTypeCubemap 0x0C
77
+ #define cudaTextureType1DLayered 0xF1
78
+ #define cudaTextureType2DLayered 0xF2
79
+ #define cudaTextureTypeCubemapLayered 0xFC
80
+
81
+ /**
82
+ * CUDA texture address modes
83
+ */
84
+ enum __device_builtin__ cudaTextureAddressMode
85
+ {
86
+ cudaAddressModeWrap = 0, /**< Wrapping address mode */
87
+ cudaAddressModeClamp = 1, /**< Clamp to edge address mode */
88
+ cudaAddressModeMirror = 2, /**< Mirror address mode */
89
+ cudaAddressModeBorder = 3 /**< Border address mode */
90
+ };
91
+
92
+ /**
93
+ * CUDA texture filter modes
94
+ */
95
+ enum __device_builtin__ cudaTextureFilterMode
96
+ {
97
+ cudaFilterModePoint = 0, /**< Point filter mode */
98
+ cudaFilterModeLinear = 1 /**< Linear filter mode */
99
+ };
100
+
101
+ /**
102
+ * CUDA texture read modes
103
+ */
104
+ enum __device_builtin__ cudaTextureReadMode
105
+ {
106
+ cudaReadModeElementType = 0, /**< Read texture as specified element type */
107
+ cudaReadModeNormalizedFloat = 1 /**< Read texture as normalized float */
108
+ };
109
+
110
+ /**
111
+ * CUDA texture descriptor
112
+ */
113
+ struct __device_builtin__ cudaTextureDesc
114
+ {
115
+ /**
116
+ * Texture address mode for up to 3 dimensions
117
+ */
118
+ enum cudaTextureAddressMode addressMode[3];
119
+ /**
120
+ * Texture filter mode
121
+ */
122
+ enum cudaTextureFilterMode filterMode;
123
+ /**
124
+ * Texture read mode
125
+ */
126
+ enum cudaTextureReadMode readMode;
127
+ /**
128
+ * Perform sRGB->linear conversion during texture read
129
+ */
130
+ int sRGB;
131
+ /**
132
+ * Texture Border Color
133
+ */
134
+ float borderColor[4];
135
+ /**
136
+ * Indicates whether texture reads are normalized or not
137
+ */
138
+ int normalizedCoords;
139
+ /**
140
+ * Limit to the anisotropy ratio
141
+ */
142
+ unsigned int maxAnisotropy;
143
+ /**
144
+ * Mipmap filter mode
145
+ */
146
+ enum cudaTextureFilterMode mipmapFilterMode;
147
+ /**
148
+ * Offset applied to the supplied mipmap level
149
+ */
150
+ float mipmapLevelBias;
151
+ /**
152
+ * Lower end of the mipmap level range to clamp access to
153
+ */
154
+ float minMipmapLevelClamp;
155
+ /**
156
+ * Upper end of the mipmap level range to clamp access to
157
+ */
158
+ float maxMipmapLevelClamp;
159
+ /**
160
+ * Disable any trilinear filtering optimizations.
161
+ */
162
+ int disableTrilinearOptimization;
163
+ /**
164
+ * Enable seamless cube map filtering.
165
+ */
166
+ int seamlessCubemap;
167
+ };
168
+
169
+ /**
170
+ * An opaque value that represents a CUDA texture object
171
+ */
172
+ typedef __device_builtin__ unsigned long long cudaTextureObject_t;
173
+
174
+ /** @} */
175
+ /** @} */ /* END CUDART_TYPES */
176
+
177
+ #endif /* !__TEXTURE_TYPES_H__ */