applied-ai-018 commited on
Commit
2fef4a6
·
verified ·
1 Parent(s): e241029

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  3. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py +0 -0
  4. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py +0 -0
  6. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h +588 -0
  7. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h +1690 -0
  8. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h +348 -0
  9. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h +659 -0
  10. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h +96 -0
  11. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h +608 -0
  12. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h +123 -0
  13. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h +78 -0
  14. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h +0 -0
  15. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h +282 -0
  16. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h +90 -0
  17. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h +280 -0
  18. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h +365 -0
  19. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h +109 -0
  20. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp +0 -0
  21. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h +735 -0
  22. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h +642 -0
  23. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h +0 -0
  24. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp +0 -0
  25. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h +367 -0
  26. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp +1546 -0
  27. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h +514 -0
  28. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h +1958 -0
  29. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h +224 -0
  30. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h +373 -0
  31. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h +148 -0
  32. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h +2300 -0
  33. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h +0 -0
  34. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h +76 -0
  35. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h +76 -0
  36. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h +201 -0
  37. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h +57 -0
  38. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h +217 -0
  39. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp +224 -0
  40. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h +65 -0
  41. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h +65 -0
  42. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h +118 -0
  43. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h +81 -0
  44. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h +145 -0
  45. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h +0 -0
  46. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h +65 -0
  47. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h +65 -0
  48. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h +103 -0
  49. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h +152 -0
  50. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h +60 -0
ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27ab1b2364482a2dccb2c681419e049b24b6b5dab31fe0058dc3ebd7c0fadd53
3
+ size 33555612
ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3558402125a35be5f7b5d60934d25ed4debddf5a910656b549142a877fd513dc
3
+ size 33555533
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CHANNEL_DESCRIPTOR_H__)
51
+ #define __CHANNEL_DESCRIPTOR_H__
52
+
53
+ #if defined(__cplusplus)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #include "cuda_runtime_api.h"
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ /**
70
+ * \addtogroup CUDART_HIGHLEVEL
71
+ *
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief \hl Returns a channel descriptor using the specified format
77
+ *
78
+ * Returns a channel descriptor with format \p f and number of bits of each
79
+ * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is
80
+ * defined as:
81
+ * \code
82
+ struct cudaChannelFormatDesc {
83
+ int x, y, z, w;
84
+ enum cudaChannelFormatKind f;
85
+ };
86
+ * \endcode
87
+ *
88
+ * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,
89
+ * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat,
90
+ * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2,
91
+ * ::cudaChannelFormatKindSignedNormalized8X4,
92
+ * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2,
93
+ * ::cudaChannelFormatKindUnsignedNormalized8X4,
94
+ * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2,
95
+ * ::cudaChannelFormatKindSignedNormalized16X4,
96
+ * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2,
97
+ * ::cudaChannelFormatKindUnsignedNormalized16X4
98
+ * or ::cudaChannelFormatKindNV12.
99
+ *
100
+ * The format is specified by the template specialization.
101
+ *
102
+ * The template function specializes for the following scalar types:
103
+ * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float.
104
+ * The template function specializes for the following vector types:
105
+ * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}.
106
+ * The template function specializes for following cudaChannelFormatKind enum values:
107
+ * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12.
108
+ *
109
+ * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone
110
+ *
111
+ * \return
112
+ * Channel descriptor with format \p f
113
+ *
114
+ * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)",
115
+ * ::cudaGetChannelDesc,
116
+ */
117
+ template<class T> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
118
+ {
119
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
120
+ }
121
+
122
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void)
123
+ {
124
+ int e = (int)sizeof(unsigned short) * 8;
125
+
126
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
127
+ }
128
+
129
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void)
130
+ {
131
+ int e = (int)sizeof(unsigned short) * 8;
132
+
133
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
134
+ }
135
+
136
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void)
137
+ {
138
+ int e = (int)sizeof(unsigned short) * 8;
139
+
140
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
141
+ }
142
+
143
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void)
144
+ {
145
+ int e = (int)sizeof(unsigned short) * 8;
146
+
147
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
148
+ }
149
+
150
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char>(void)
151
+ {
152
+ int e = (int)sizeof(char) * 8;
153
+
154
+ #if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__)
155
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
156
+ #else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
157
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
158
+ #endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
159
+ }
160
+
161
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<signed char>(void)
162
+ {
163
+ int e = (int)sizeof(signed char) * 8;
164
+
165
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
166
+ }
167
+
168
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned char>(void)
169
+ {
170
+ int e = (int)sizeof(unsigned char) * 8;
171
+
172
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
173
+ }
174
+
175
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char1>(void)
176
+ {
177
+ int e = (int)sizeof(signed char) * 8;
178
+
179
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
180
+ }
181
+
182
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar1>(void)
183
+ {
184
+ int e = (int)sizeof(unsigned char) * 8;
185
+
186
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
187
+ }
188
+
189
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char2>(void)
190
+ {
191
+ int e = (int)sizeof(signed char) * 8;
192
+
193
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
194
+ }
195
+
196
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar2>(void)
197
+ {
198
+ int e = (int)sizeof(unsigned char) * 8;
199
+
200
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
201
+ }
202
+
203
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char4>(void)
204
+ {
205
+ int e = (int)sizeof(signed char) * 8;
206
+
207
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
208
+ }
209
+
210
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar4>(void)
211
+ {
212
+ int e = (int)sizeof(unsigned char) * 8;
213
+
214
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
215
+ }
216
+
217
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short>(void)
218
+ {
219
+ int e = (int)sizeof(short) * 8;
220
+
221
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
222
+ }
223
+
224
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned short>(void)
225
+ {
226
+ int e = (int)sizeof(unsigned short) * 8;
227
+
228
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
229
+ }
230
+
231
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short1>(void)
232
+ {
233
+ int e = (int)sizeof(short) * 8;
234
+
235
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
236
+ }
237
+
238
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort1>(void)
239
+ {
240
+ int e = (int)sizeof(unsigned short) * 8;
241
+
242
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
243
+ }
244
+
245
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short2>(void)
246
+ {
247
+ int e = (int)sizeof(short) * 8;
248
+
249
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
250
+ }
251
+
252
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort2>(void)
253
+ {
254
+ int e = (int)sizeof(unsigned short) * 8;
255
+
256
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
257
+ }
258
+
259
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short4>(void)
260
+ {
261
+ int e = (int)sizeof(short) * 8;
262
+
263
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
264
+ }
265
+
266
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort4>(void)
267
+ {
268
+ int e = (int)sizeof(unsigned short) * 8;
269
+
270
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
271
+ }
272
+
273
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int>(void)
274
+ {
275
+ int e = (int)sizeof(int) * 8;
276
+
277
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
278
+ }
279
+
280
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned int>(void)
281
+ {
282
+ int e = (int)sizeof(unsigned int) * 8;
283
+
284
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
285
+ }
286
+
287
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int1>(void)
288
+ {
289
+ int e = (int)sizeof(int) * 8;
290
+
291
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
292
+ }
293
+
294
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint1>(void)
295
+ {
296
+ int e = (int)sizeof(unsigned int) * 8;
297
+
298
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
299
+ }
300
+
301
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int2>(void)
302
+ {
303
+ int e = (int)sizeof(int) * 8;
304
+
305
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
306
+ }
307
+
308
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint2>(void)
309
+ {
310
+ int e = (int)sizeof(unsigned int) * 8;
311
+
312
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
313
+ }
314
+
315
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int4>(void)
316
+ {
317
+ int e = (int)sizeof(int) * 8;
318
+
319
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
320
+ }
321
+
322
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint4>(void)
323
+ {
324
+ int e = (int)sizeof(unsigned int) * 8;
325
+
326
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
327
+ }
328
+
329
+ #if !defined(__LP64__)
330
+
331
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long>(void)
332
+ {
333
+ int e = (int)sizeof(long) * 8;
334
+
335
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
336
+ }
337
+
338
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned long>(void)
339
+ {
340
+ int e = (int)sizeof(unsigned long) * 8;
341
+
342
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
343
+ }
344
+
345
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long1>(void)
346
+ {
347
+ int e = (int)sizeof(long) * 8;
348
+
349
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
350
+ }
351
+
352
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong1>(void)
353
+ {
354
+ int e = (int)sizeof(unsigned long) * 8;
355
+
356
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
357
+ }
358
+
359
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long2>(void)
360
+ {
361
+ int e = (int)sizeof(long) * 8;
362
+
363
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
364
+ }
365
+
366
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong2>(void)
367
+ {
368
+ int e = (int)sizeof(unsigned long) * 8;
369
+
370
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
371
+ }
372
+
373
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long4>(void)
374
+ {
375
+ int e = (int)sizeof(long) * 8;
376
+
377
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
378
+ }
379
+
380
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong4>(void)
381
+ {
382
+ int e = (int)sizeof(unsigned long) * 8;
383
+
384
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
385
+ }
386
+
387
+ #endif /* !__LP64__ */
388
+
389
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float>(void)
390
+ {
391
+ int e = (int)sizeof(float) * 8;
392
+
393
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
394
+ }
395
+
396
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float1>(void)
397
+ {
398
+ int e = (int)sizeof(float) * 8;
399
+
400
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
401
+ }
402
+
403
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float2>(void)
404
+ {
405
+ int e = (int)sizeof(float) * 8;
406
+
407
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
408
+ }
409
+
410
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float4>(void)
411
+ {
412
+ int e = (int)sizeof(float) * 8;
413
+
414
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
415
+ }
416
+
417
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void)
418
+ {
419
+ int e = (int)sizeof(char) * 8;
420
+
421
+ return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12);
422
+ }
423
+
424
+ template<cudaChannelFormatKind> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
425
+ {
426
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
427
+ }
428
+
429
+ /* Signed 8-bit normalized integer formats */
430
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X1>(void)
431
+ {
432
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1);
433
+ }
434
+
435
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X2>(void)
436
+ {
437
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2);
438
+ }
439
+
440
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X4>(void)
441
+ {
442
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4);
443
+ }
444
+
445
+ /* Unsigned 8-bit normalized integer formats */
446
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X1>(void)
447
+ {
448
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1);
449
+ }
450
+
451
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X2>(void)
452
+ {
453
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2);
454
+ }
455
+
456
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X4>(void)
457
+ {
458
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4);
459
+ }
460
+
461
+ /* Signed 16-bit normalized integer formats */
462
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X1>(void)
463
+ {
464
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1);
465
+ }
466
+
467
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X2>(void)
468
+ {
469
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2);
470
+ }
471
+
472
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X4>(void)
473
+ {
474
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4);
475
+ }
476
+
477
+ /* Unsigned 16-bit normalized integer formats */
478
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X1>(void)
479
+ {
480
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1);
481
+ }
482
+
483
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X2>(void)
484
+ {
485
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2);
486
+ }
487
+
488
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X4>(void)
489
+ {
490
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4);
491
+ }
492
+
493
+ /* NV12 format */
494
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindNV12>(void)
495
+ {
496
+ return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12);
497
+ }
498
+
499
+ /* BC1 format */
500
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1>(void)
501
+ {
502
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1);
503
+ }
504
+
505
+ /* BC1sRGB format */
506
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1SRGB>(void)
507
+ {
508
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB);
509
+ }
510
+
511
+ /* BC2 format */
512
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2>(void)
513
+ {
514
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2);
515
+ }
516
+
517
+ /* BC2sRGB format */
518
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2SRGB>(void)
519
+ {
520
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB);
521
+ }
522
+
523
+ /* BC3 format */
524
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3>(void)
525
+ {
526
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3);
527
+ }
528
+
529
+ /* BC3sRGB format */
530
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3SRGB>(void)
531
+ {
532
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB);
533
+ }
534
+
535
+ /* BC4 unsigned format */
536
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed4>(void)
537
+ {
538
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4);
539
+ }
540
+
541
+ /* BC4 signed format */
542
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed4>(void)
543
+ {
544
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4);
545
+ }
546
+
547
+ /* BC5 unsigned format */
548
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed5>(void)
549
+ {
550
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5);
551
+ }
552
+
553
+ /* BC5 signed format */
554
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed5>(void)
555
+ {
556
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5);
557
+ }
558
+
559
+ /* BC6H unsigned format */
560
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed6H>(void)
561
+ {
562
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H);
563
+ }
564
+
565
+ /* BC6H signed format */
566
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed6H>(void)
567
+ {
568
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H);
569
+ }
570
+
571
+ /* BC7 format */
572
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7>(void)
573
+ {
574
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7);
575
+ }
576
+
577
+ /* BC7sRGB format */
578
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7SRGB>(void)
579
+ {
580
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB);
581
+ }
582
+
583
+ #endif /* __cplusplus */
584
+
585
+ /** @} */
586
+ /** @} */ /* END CUDART_TEXTURE_HL */
587
+
588
+ #endif /* !__CHANNEL_DESCRIPTOR_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h ADDED
@@ -0,0 +1,1690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _COOPERATIVE_GROUPS_H_
51
+ #define _COOPERATIVE_GROUPS_H_
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ #include "cooperative_groups/details/info.h"
56
+ #include "cooperative_groups/details/driver_abi.h"
57
+ #include "cooperative_groups/details/helpers.h"
58
+ #include "cooperative_groups/details/memory.h"
59
+
60
+ #if defined(_CG_HAS_STL_ATOMICS)
61
+ #include <cuda/atomic>
62
+ #define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope;
63
+ #else
64
+ #define _CG_THREAD_SCOPE(scope)
65
+ #endif
66
+
67
+ _CG_BEGIN_NAMESPACE
68
+
69
+ namespace details {
70
+ _CG_CONST_DECL unsigned int coalesced_group_id = 1;
71
+ _CG_CONST_DECL unsigned int multi_grid_group_id = 2;
72
+ _CG_CONST_DECL unsigned int grid_group_id = 3;
73
+ _CG_CONST_DECL unsigned int thread_block_id = 4;
74
+ _CG_CONST_DECL unsigned int multi_tile_group_id = 5;
75
+ _CG_CONST_DECL unsigned int cluster_group_id = 6;
76
+ }
77
+
78
+ /**
79
+ * class thread_group;
80
+ *
81
+ * Generic thread group type, into which all groups are convertible.
82
+ * It acts as a container for all storage necessary for the derived groups,
83
+ * and will dispatch the API calls to the correct derived group. This means
84
+ * that all derived groups must implement the same interface as thread_group.
85
+ */
86
+ class thread_group
87
+ {
88
+ protected:
89
+ struct group_data {
90
+ unsigned int _unused : 1;
91
+ unsigned int type : 7, : 0;
92
+ };
93
+
94
+ struct gg_data {
95
+ details::grid_workspace *gridWs;
96
+ };
97
+
98
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
99
+ struct mg_data {
100
+ unsigned long long _unused : 1;
101
+ unsigned long long type : 7;
102
+ unsigned long long handle : 56;
103
+ const details::multi_grid::multi_grid_functions *functions;
104
+ };
105
+ #endif
106
+
107
+ struct tg_data {
108
+ unsigned int is_tiled : 1;
109
+ unsigned int type : 7;
110
+ unsigned int size : 24;
111
+ // packed to 4b
112
+ unsigned int metaGroupSize : 16;
113
+ unsigned int metaGroupRank : 16;
114
+ // packed to 8b
115
+ unsigned int mask;
116
+ // packed to 12b
117
+ unsigned int _res;
118
+ };
119
+
120
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
121
+ friend class thread_block;
122
+
123
+ union __align__(8) {
124
+ group_data group;
125
+ tg_data coalesced;
126
+ gg_data grid;
127
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
128
+ mg_data multi_grid;
129
+ #endif
130
+ } _data;
131
+
132
+ _CG_QUALIFIER thread_group operator=(const thread_group& src);
133
+
134
+ _CG_QUALIFIER thread_group(unsigned int type) {
135
+ _data.group.type = type;
136
+ _data.group._unused = false;
137
+ }
138
+
139
+ #ifdef _CG_CPP11_FEATURES
140
+ static_assert(sizeof(tg_data) <= 16, "Failed size check");
141
+ static_assert(sizeof(gg_data) <= 16, "Failed size check");
142
+ # ifdef _CG_ABI_EXPERIMENTAL
143
+ static_assert(sizeof(mg_data) <= 16, "Failed size check");
144
+ # endif
145
+ #endif
146
+
147
+ public:
148
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
149
+
150
+ _CG_QUALIFIER unsigned long long size() const;
151
+ _CG_QUALIFIER unsigned long long num_threads() const;
152
+ _CG_QUALIFIER unsigned long long thread_rank() const;
153
+ _CG_QUALIFIER void sync() const;
154
+ _CG_QUALIFIER unsigned int get_type() const {
155
+ return _data.group.type;
156
+ }
157
+
158
+ };
159
+
160
+ template <unsigned int TyId>
161
+ struct thread_group_base : public thread_group {
162
+ _CG_QUALIFIER thread_group_base() : thread_group(TyId) {}
163
+ _CG_STATIC_CONST_DECL unsigned int id = TyId;
164
+ };
165
+
166
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
167
+
168
+ /**
169
+ * class multi_grid_group;
170
+ *
171
+ * Threads within this this group are guaranteed to be co-resident on the
172
+ * same system, on multiple devices within the same launched kernels.
173
+ * To use this group, the kernel must have been launched with
174
+ * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent),
175
+ * and the device must support it (queryable device attribute).
176
+ *
177
+ * Constructed via this_multi_grid();
178
+ */
179
+
180
+
181
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
182
+ class multi_grid_group;
183
+
184
+ // Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls
185
+ template <typename = void>
186
+ __device__ _CG_DEPRECATED multi_grid_group this_multi_grid();
187
+
188
+ class multi_grid_group : public thread_group_base<details::multi_grid_group_id>
189
+ {
190
+ private:
191
+ template <typename = void>
192
+ _CG_QUALIFIER multi_grid_group() {
193
+ _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics();
194
+ _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle();
195
+ }
196
+
197
+ friend multi_grid_group this_multi_grid<void>();
198
+
199
+ public:
200
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
201
+
202
+ _CG_QUALIFIER bool is_valid() const {
203
+ return (_data.multi_grid.handle != 0);
204
+ }
205
+
206
+ _CG_QUALIFIER void sync() const {
207
+ if (!is_valid()) {
208
+ _CG_ABORT();
209
+ }
210
+ _data.multi_grid.functions->sync(_data.multi_grid.handle);
211
+ }
212
+
213
+ _CG_QUALIFIER unsigned long long num_threads() const {
214
+ _CG_ASSERT(is_valid());
215
+ return _data.multi_grid.functions->size(_data.multi_grid.handle);
216
+ }
217
+
218
+ _CG_QUALIFIER unsigned long long size() const {
219
+ return num_threads();
220
+ }
221
+
222
+ _CG_QUALIFIER unsigned long long thread_rank() const {
223
+ _CG_ASSERT(is_valid());
224
+ return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle);
225
+ }
226
+
227
+ _CG_QUALIFIER unsigned int grid_rank() const {
228
+ _CG_ASSERT(is_valid());
229
+ return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle));
230
+ }
231
+
232
+ _CG_QUALIFIER unsigned int num_grids() const {
233
+ _CG_ASSERT(is_valid());
234
+ return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle));
235
+ }
236
+ };
237
+ # else
238
+ class multi_grid_group
239
+ {
240
+ private:
241
+ unsigned long long _handle;
242
+ unsigned int _size;
243
+ unsigned int _rank;
244
+
245
+ friend _CG_QUALIFIER multi_grid_group this_multi_grid();
246
+
247
+ _CG_QUALIFIER multi_grid_group() {
248
+ _handle = details::multi_grid::get_intrinsic_handle();
249
+ _size = details::multi_grid::size(_handle);
250
+ _rank = details::multi_grid::thread_rank(_handle);
251
+ }
252
+
253
+ public:
254
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
255
+
256
+ _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const {
257
+ return (_handle != 0);
258
+ }
259
+
260
+ _CG_QUALIFIER _CG_DEPRECATED void sync() const {
261
+ if (!is_valid()) {
262
+ _CG_ABORT();
263
+ }
264
+ details::multi_grid::sync(_handle);
265
+ }
266
+
267
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const {
268
+ _CG_ASSERT(is_valid());
269
+ return _size;
270
+ }
271
+
272
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const {
273
+ return num_threads();
274
+ }
275
+
276
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const {
277
+ _CG_ASSERT(is_valid());
278
+ return _rank;
279
+ }
280
+
281
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const {
282
+ _CG_ASSERT(is_valid());
283
+ return (details::multi_grid::grid_rank(_handle));
284
+ }
285
+
286
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const {
287
+ _CG_ASSERT(is_valid());
288
+ return (details::multi_grid::num_grids(_handle));
289
+ }
290
+ };
291
+ # endif
292
+
293
+ /**
294
+ * multi_grid_group this_multi_grid()
295
+ *
296
+ * Constructs a multi_grid_group
297
+ */
298
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
299
+ template <typename>
300
+ __device__
301
+ #else
302
+ _CG_QUALIFIER
303
+ # endif
304
+ _CG_DEPRECATED
305
+ multi_grid_group this_multi_grid()
306
+ {
307
+ return multi_grid_group();
308
+ }
309
+ #endif
310
+
311
+ /**
312
+ * class grid_group;
313
+ *
314
+ * Threads within this this group are guaranteed to be co-resident on the
315
+ * same device within the same launched kernel. To use this group, the kernel
316
+ * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent),
317
+ * and the device must support it (queryable device attribute).
318
+ *
319
+ * Constructed via this_grid();
320
+ */
321
+ class grid_group : public thread_group_base<details::grid_group_id>
322
+ {
323
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id;
324
+ friend _CG_QUALIFIER grid_group this_grid();
325
+
326
+ private:
327
+ _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) {
328
+ _data.grid.gridWs = gridWs;
329
+ }
330
+
331
+ public:
332
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
333
+
334
+ _CG_QUALIFIER bool is_valid() const {
335
+ return (_data.grid.gridWs != NULL);
336
+ }
337
+
338
+ _CG_QUALIFIER void sync() const {
339
+ if (!is_valid()) {
340
+ _CG_ABORT();
341
+ }
342
+ details::grid::sync(&_data.grid.gridWs->barrier);
343
+ }
344
+
345
+ _CG_STATIC_QUALIFIER unsigned long long size() {
346
+ return details::grid::size();
347
+ }
348
+
349
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank() {
350
+ return details::grid::thread_rank();
351
+ }
352
+
353
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
354
+ return details::grid::grid_dim();
355
+ }
356
+
357
+ _CG_STATIC_QUALIFIER unsigned long long num_threads() {
358
+ return details::grid::num_threads();
359
+ }
360
+
361
+ _CG_STATIC_QUALIFIER dim3 dim_blocks() {
362
+ return details::grid::dim_blocks();
363
+ }
364
+
365
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks() {
366
+ return details::grid::num_blocks();
367
+ }
368
+
369
+ _CG_STATIC_QUALIFIER dim3 block_index() {
370
+ return details::grid::block_index();
371
+ }
372
+
373
+ _CG_STATIC_QUALIFIER unsigned long long block_rank() {
374
+ return details::grid::block_rank();
375
+ }
376
+
377
+ # if defined(_CG_HAS_CLUSTER_GROUP)
378
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
379
+ return details::grid::dim_clusters();
380
+ }
381
+
382
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
383
+ return details::grid::num_clusters();
384
+ }
385
+
386
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
387
+ return details::grid::cluster_index();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
391
+ return details::grid::cluster_rank();
392
+ }
393
+ # endif
394
+ };
395
+
396
+ _CG_QUALIFIER grid_group this_grid() {
397
+ // Load a workspace from the driver
398
+ grid_group gg(details::get_grid_workspace());
399
+ #ifdef _CG_DEBUG
400
+ // *all* threads must be available to synchronize
401
+ gg.sync();
402
+ #endif // _CG_DEBUG
403
+ return gg;
404
+ }
405
+
406
+ #if defined(_CG_HAS_CLUSTER_GROUP)
407
+ /**
408
+ * class cluster_group
409
+ *
410
+ * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly
411
+ * divided along all dimensions to form groups of blocks, each group of which is
412
+ * a block cluster. Clustered grids are subject to various restrictions and
413
+ * limitations. Primarily, a cluster consists of at most 8 blocks by default
414
+ * (although the user is allowed to opt-in to non-standard sizes,) and clustered
415
+ * grids are subject to additional occupancy limitations due to per-cluster
416
+ * hardware resource consumption. In exchange, a block cluster is guaranteed to
417
+ * be a cooperative group, with access to all cooperative group capabilities, as
418
+ * well as cluster specific capabilities and accelerations. A cluster_group
419
+ * represents a block cluster.
420
+ *
421
+ * Constructed via this_cluster_group();
422
+ */
423
+ class cluster_group : public thread_group_base<details::cluster_group_id>
424
+ {
425
+ // Friends
426
+ friend _CG_QUALIFIER cluster_group this_cluster();
427
+
428
+ // Disable constructor
429
+ _CG_QUALIFIER cluster_group()
430
+ {
431
+ }
432
+
433
+ public:
434
+ //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster)
435
+
436
+ using arrival_token = struct {};
437
+
438
+ // Functionality exposed by the group
439
+ _CG_STATIC_QUALIFIER void sync()
440
+ {
441
+ return details::cluster::sync();
442
+ }
443
+
444
+ _CG_STATIC_QUALIFIER arrival_token barrier_arrive()
445
+ {
446
+ details::cluster::barrier_arrive();
447
+ return arrival_token();
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void barrier_wait()
451
+ {
452
+ return details::cluster::barrier_wait();
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&)
456
+ {
457
+ return details::cluster::barrier_wait();
458
+ }
459
+
460
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
461
+ {
462
+ return details::cluster::query_shared_rank(addr);
463
+ }
464
+
465
+ template <typename T>
466
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
467
+ {
468
+ return details::cluster::map_shared_rank(addr, rank);
469
+ }
470
+
471
+ _CG_STATIC_QUALIFIER dim3 block_index()
472
+ {
473
+ return details::cluster::block_index();
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
477
+ {
478
+ return details::cluster::block_rank();
479
+ }
480
+
481
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
482
+ {
483
+ return details::cluster::thread_rank();
484
+ }
485
+
486
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
487
+ {
488
+ return details::cluster::dim_blocks();
489
+ }
490
+
491
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
492
+ {
493
+ return details::cluster::num_blocks();
494
+ }
495
+
496
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
497
+ {
498
+ return details::cluster::dim_threads();
499
+ }
500
+
501
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
502
+ {
503
+ return details::cluster::num_threads();
504
+ }
505
+
506
+ // Legacy aliases
507
+ _CG_STATIC_QUALIFIER unsigned int size()
508
+ {
509
+ return num_threads();
510
+ }
511
+ };
512
+
513
+ /*
514
+ * cluster_group this_cluster()
515
+ *
516
+ * Constructs a cluster_group
517
+ */
518
+ _CG_QUALIFIER cluster_group this_cluster()
519
+ {
520
+ cluster_group cg;
521
+ #ifdef _CG_DEBUG
522
+ cg.sync();
523
+ #endif
524
+ return cg;
525
+ }
526
+ #endif
527
+
528
+ #if defined(_CG_CPP11_FEATURES)
529
+ class thread_block;
530
+ template <unsigned int MaxBlockSize>
531
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
532
+ #endif
533
+
534
+ /**
535
+ * class thread_block
536
+ *
537
+ * Every GPU kernel is executed by a grid of thread blocks, and threads within
538
+ * each block are guaranteed to reside on the same streaming multiprocessor.
539
+ * A thread_block represents a thread block whose dimensions are not known until runtime.
540
+ *
541
+ * Constructed via this_thread_block();
542
+ */
543
+ class thread_block : public thread_group_base<details::thread_block_id>
544
+ {
545
+ // Friends
546
+ friend _CG_QUALIFIER thread_block this_thread_block();
547
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
548
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz);
549
+
550
+ #if defined(_CG_CPP11_FEATURES)
551
+ template <unsigned int MaxBlockSize>
552
+ friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
553
+ template <unsigned int Size>
554
+ friend class __static_size_multi_warp_tile_base;
555
+
556
+ details::multi_warp_scratch* const tile_memory;
557
+
558
+ template <unsigned int MaxBlockSize>
559
+ _CG_QUALIFIER thread_block(block_tile_memory<MaxBlockSize>& scratch) :
560
+ tile_memory(details::get_scratch_ptr(&scratch)) {
561
+ #ifdef _CG_DEBUG
562
+ if (num_threads() > MaxBlockSize) {
563
+ details::abort();
564
+ }
565
+ #endif
566
+ #if !defined(_CG_HAS_RESERVED_SHARED)
567
+ tile_memory->init_barriers(thread_rank());
568
+ sync();
569
+ #endif
570
+ }
571
+ #endif
572
+
573
+ // Disable constructor
574
+ _CG_QUALIFIER thread_block()
575
+ #if defined(_CG_CPP11_FEATURES)
576
+ : tile_memory(details::get_scratch_ptr(NULL))
577
+ #endif
578
+ { }
579
+
580
+ // Internal Use
581
+ _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const {
582
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
583
+
584
+ // Invalid, immediately fail
585
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
586
+ details::abort();
587
+ return (thread_block());
588
+ }
589
+
590
+ unsigned int mask;
591
+ unsigned int base_offset = thread_rank() & (~(tilesz - 1));
592
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
593
+
594
+ mask = (unsigned int)(-1) >> (32 - masklength);
595
+ mask <<= (details::laneid() & ~(tilesz - 1));
596
+ thread_group tile = thread_group(details::coalesced_group_id);
597
+ tile._data.coalesced.mask = mask;
598
+ tile._data.coalesced.size = __popc(mask);
599
+ tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz;
600
+ tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz;
601
+ tile._data.coalesced.is_tiled = true;
602
+ return (tile);
603
+ }
604
+
605
+ public:
606
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id;
607
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
608
+
609
+ _CG_STATIC_QUALIFIER void sync() {
610
+ details::cta::sync();
611
+ }
612
+
613
+ _CG_STATIC_QUALIFIER unsigned int size() {
614
+ return details::cta::size();
615
+ }
616
+
617
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
618
+ return details::cta::thread_rank();
619
+ }
620
+
621
+ // Additional functionality exposed by the group
622
+ _CG_STATIC_QUALIFIER dim3 group_index() {
623
+ return details::cta::group_index();
624
+ }
625
+
626
+ _CG_STATIC_QUALIFIER dim3 thread_index() {
627
+ return details::cta::thread_index();
628
+ }
629
+
630
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
631
+ return details::cta::block_dim();
632
+ }
633
+
634
+ _CG_STATIC_QUALIFIER dim3 dim_threads() {
635
+ return details::cta::dim_threads();
636
+ }
637
+
638
+ _CG_STATIC_QUALIFIER unsigned int num_threads() {
639
+ return details::cta::num_threads();
640
+ }
641
+
642
+ };
643
+
644
+ /**
645
+ * thread_block this_thread_block()
646
+ *
647
+ * Constructs a thread_block group
648
+ */
649
+ _CG_QUALIFIER thread_block this_thread_block()
650
+ {
651
+ return (thread_block());
652
+ }
653
+
654
+ #if defined(_CG_CPP11_FEATURES)
655
+ template <unsigned int MaxBlockSize>
656
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch) {
657
+ return (thread_block(scratch));
658
+ }
659
+ #endif
660
+
661
+ /**
662
+ * class coalesced_group
663
+ *
664
+ * A group representing the current set of converged threads in a warp.
665
+ * The size of the group is not guaranteed and it may return a group of
666
+ * only one thread (itself).
667
+ *
668
+ * This group exposes warp-synchronous builtins.
669
+ * Constructed via coalesced_threads();
670
+ */
671
+ class coalesced_group : public thread_group_base<details::coalesced_group_id>
672
+ {
673
+ private:
674
+ friend _CG_QUALIFIER coalesced_group coalesced_threads();
675
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
676
+ friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz);
677
+ friend class details::_coalesced_group_data_access;
678
+
679
+ _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const {
680
+ unsigned int member_pack = 0;
681
+ unsigned int member_rank = 0;
682
+ for (int bit_idx = 0; bit_idx < 32; bit_idx++) {
683
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
684
+ if (lane_bit) {
685
+ if (laneMask & lane_bit)
686
+ member_pack |= 1 << member_rank;
687
+ member_rank++;
688
+ }
689
+ }
690
+ return (member_pack);
691
+ }
692
+
693
+ // Internal Use
694
+ _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const {
695
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
696
+
697
+ // Invalid, immediately fail
698
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
699
+ details::abort();
700
+ return (coalesced_group(0));
701
+ }
702
+ if (size() <= tilesz) {
703
+ return (*this);
704
+ }
705
+
706
+ if ((_data.coalesced.is_tiled == true) && pow2_tilesz) {
707
+ unsigned int base_offset = (thread_rank() & (~(tilesz - 1)));
708
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
709
+ unsigned int mask = (unsigned int)(-1) >> (32 - masklength);
710
+
711
+ mask <<= (details::laneid() & ~(tilesz - 1));
712
+ coalesced_group coalesced_tile = coalesced_group(mask);
713
+ coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz;
714
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
715
+ coalesced_tile._data.coalesced.is_tiled = true;
716
+ return (coalesced_tile);
717
+ }
718
+ else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) {
719
+ unsigned int mask = 0;
720
+ unsigned int member_rank = 0;
721
+ int seen_lanes = (thread_rank() / tilesz) * tilesz;
722
+ for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) {
723
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
724
+ if (lane_bit) {
725
+ if (seen_lanes <= 0 && member_rank < tilesz) {
726
+ mask |= lane_bit;
727
+ member_rank++;
728
+ }
729
+ seen_lanes--;
730
+ }
731
+ }
732
+ coalesced_group coalesced_tile = coalesced_group(mask);
733
+ // Override parent with the size of this group
734
+ coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz;
735
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
736
+ return coalesced_tile;
737
+ }
738
+ else {
739
+ // None in _CG_VERSION 1000
740
+ details::abort();
741
+ }
742
+
743
+ return (coalesced_group(0));
744
+ }
745
+
746
+ protected:
747
+ _CG_QUALIFIER coalesced_group(unsigned int mask) {
748
+ _data.coalesced.mask = mask;
749
+ _data.coalesced.size = __popc(mask);
750
+ _data.coalesced.metaGroupRank = 0;
751
+ _data.coalesced.metaGroupSize = 1;
752
+ _data.coalesced.is_tiled = false;
753
+ }
754
+
755
+ _CG_QUALIFIER unsigned int get_mask() const {
756
+ return (_data.coalesced.mask);
757
+ }
758
+
759
+ public:
760
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
761
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
762
+
763
+ _CG_QUALIFIER unsigned int num_threads() const {
764
+ return _data.coalesced.size;
765
+ }
766
+
767
+ _CG_QUALIFIER unsigned int size() const {
768
+ return num_threads();
769
+ }
770
+
771
+ _CG_QUALIFIER unsigned int thread_rank() const {
772
+ return (__popc(_data.coalesced.mask & details::lanemask32_lt()));
773
+ }
774
+
775
+ // Rank of this group in the upper level of the hierarchy
776
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
777
+ return _data.coalesced.metaGroupRank;
778
+ }
779
+
780
+ // Total num partitions created out of all CTAs when the group was created
781
+ _CG_QUALIFIER unsigned int meta_group_size() const {
782
+ return _data.coalesced.metaGroupSize;
783
+ }
784
+
785
+ _CG_QUALIFIER void sync() const {
786
+ __syncwarp(_data.coalesced.mask);
787
+ }
788
+
789
+ #ifdef _CG_CPP11_FEATURES
790
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
791
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
792
+ unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 :
793
+ (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1));
794
+
795
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
796
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
797
+ }
798
+
799
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
800
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
801
+ if (size() == 32) {
802
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
803
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
804
+ }
805
+
806
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
807
+
808
+ if (lane >= 32)
809
+ lane = details::laneid();
810
+
811
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
812
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
813
+ }
814
+
815
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
816
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const {
817
+ if (size() == 32) {
818
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
819
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
820
+ }
821
+
822
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
823
+ if (lane >= 32)
824
+ lane = details::laneid();
825
+
826
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
827
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
828
+ }
829
+ #else
830
+ template <typename TyIntegral>
831
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const {
832
+ details::assert_if_not_arithmetic<TyIntegral>();
833
+ unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 :
834
+ (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1));
835
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
836
+ }
837
+
838
+ template <typename TyIntegral>
839
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const {
840
+ details::assert_if_not_arithmetic<TyIntegral>();
841
+ if (size() == 32) {
842
+ return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32));
843
+ }
844
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
845
+ if (lane >= 32) lane = details::laneid();
846
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
847
+ }
848
+
849
+ template <typename TyIntegral>
850
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const {
851
+ details::assert_if_not_arithmetic<TyIntegral>();
852
+ if (size() == 32) {
853
+ return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32));
854
+ }
855
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
856
+ if (lane >= 32) lane = details::laneid();
857
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
858
+ }
859
+ #endif
860
+
861
+ _CG_QUALIFIER int any(int predicate) const {
862
+ return (__ballot_sync(_data.coalesced.mask, predicate) != 0);
863
+ }
864
+ _CG_QUALIFIER int all(int predicate) const {
865
+ return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask);
866
+ }
867
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
868
+ if (size() == 32) {
869
+ return (__ballot_sync(0xFFFFFFFF, predicate));
870
+ }
871
+ unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate);
872
+ return (_packLanes(lane_ballot));
873
+ }
874
+
875
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
876
+
877
+ template <typename TyIntegral>
878
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
879
+ details::assert_if_not_arithmetic<TyIntegral>();
880
+ if (size() == 32) {
881
+ return (__match_any_sync(0xFFFFFFFF, val));
882
+ }
883
+ unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val);
884
+ return (_packLanes(lane_match));
885
+ }
886
+
887
+ template <typename TyIntegral>
888
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
889
+ details::assert_if_not_arithmetic<TyIntegral>();
890
+ if (size() == 32) {
891
+ return (__match_all_sync(0xFFFFFFFF, val, &pred));
892
+ }
893
+ unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred);
894
+ return (_packLanes(lane_match));
895
+ }
896
+
897
+ #endif /* !_CG_HAS_MATCH_COLLECTIVE */
898
+
899
+ };
900
+
901
+ _CG_QUALIFIER coalesced_group coalesced_threads()
902
+ {
903
+ return (coalesced_group(__activemask()));
904
+ }
905
+
906
+ namespace details {
907
+ template <unsigned int Size> struct verify_thread_block_tile_size;
908
+ template <> struct verify_thread_block_tile_size<32> { typedef void OK; };
909
+ template <> struct verify_thread_block_tile_size<16> { typedef void OK; };
910
+ template <> struct verify_thread_block_tile_size<8> { typedef void OK; };
911
+ template <> struct verify_thread_block_tile_size<4> { typedef void OK; };
912
+ template <> struct verify_thread_block_tile_size<2> { typedef void OK; };
913
+ template <> struct verify_thread_block_tile_size<1> { typedef void OK; };
914
+
915
+ #ifdef _CG_CPP11_FEATURES
916
+ template <unsigned int Size>
917
+ using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant<bool, (Size & (Size - 1)) == 0>;
918
+
919
+ template <unsigned int Size>
920
+ using _is_single_warp = _CG_STL_NAMESPACE::integral_constant<bool, Size <= 32>;
921
+ template <unsigned int Size>
922
+ using _is_multi_warp =
923
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size > 32) && (Size <= 1024)>;
924
+
925
+ template <unsigned int Size>
926
+ using _is_valid_single_warp_tile =
927
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_single_warp<Size>::value>;
928
+ template <unsigned int Size>
929
+ using _is_valid_multi_warp_tile =
930
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_multi_warp<Size>::value>;
931
+ #else
932
+ template <unsigned int Size>
933
+ struct _is_multi_warp {
934
+ static const bool value = false;
935
+ };
936
+ #endif
937
+ }
938
+
939
+ template <unsigned int Size>
940
+ class __static_size_tile_base
941
+ {
942
+ protected:
943
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
944
+
945
+ public:
946
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
947
+
948
+ // Rank of thread within tile
949
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
950
+ return (details::cta::thread_rank() & (numThreads - 1));
951
+ }
952
+
953
+ // Number of threads within tile
954
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() {
955
+ return numThreads;
956
+ }
957
+
958
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() {
959
+ return num_threads();
960
+ }
961
+ };
962
+
963
+ template <unsigned int Size>
964
+ class __static_size_thread_block_tile_base : public __static_size_tile_base<Size>
965
+ {
966
+ friend class details::_coalesced_group_data_access;
967
+ typedef details::tile::tile_helpers<Size> th;
968
+
969
+ #ifdef _CG_CPP11_FEATURES
970
+ static_assert(details::_is_valid_single_warp_tile<Size>::value, "Size must be one of 1/2/4/8/16/32");
971
+ #else
972
+ typedef typename details::verify_thread_block_tile_size<Size>::OK valid;
973
+ #endif
974
+ using __static_size_tile_base<Size>::numThreads;
975
+ _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF;
976
+
977
+ protected:
978
+ _CG_STATIC_QUALIFIER unsigned int build_mask() {
979
+ unsigned int mask = fullMask;
980
+ if (numThreads != 32) {
981
+ // [0,31] representing the current active thread in the warp
982
+ unsigned int laneId = details::laneid();
983
+ // shift mask according to the partition it belongs to
984
+ mask = th::tileMask << (laneId & ~(th::laneMask));
985
+ }
986
+ return (mask);
987
+ }
988
+
989
+ public:
990
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
991
+
992
+ _CG_STATIC_QUALIFIER void sync() {
993
+ __syncwarp(build_mask());
994
+ }
995
+
996
+ #ifdef _CG_CPP11_FEATURES
997
+ // PTX supported collectives
998
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
999
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
1000
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
1001
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), srcRank, numThreads);
1002
+ }
1003
+
1004
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1005
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
1006
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
1007
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1008
+ }
1009
+
1010
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1011
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const {
1012
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
1013
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1014
+ }
1015
+
1016
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1017
+ _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const {
1018
+ return details::tile::shuffle_dispatch<TyElem>::shfl_xor(
1019
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), laneMask, numThreads);
1020
+ }
1021
+ #else
1022
+ template <typename TyIntegral>
1023
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const {
1024
+ details::assert_if_not_arithmetic<TyIntegral>();
1025
+ return (__shfl_sync(build_mask(), var, srcRank, numThreads));
1026
+ }
1027
+
1028
+ template <typename TyIntegral>
1029
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const {
1030
+ details::assert_if_not_arithmetic<TyIntegral>();
1031
+ return (__shfl_down_sync(build_mask(), var, delta, numThreads));
1032
+ }
1033
+
1034
+ template <typename TyIntegral>
1035
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const {
1036
+ details::assert_if_not_arithmetic<TyIntegral>();
1037
+ return (__shfl_up_sync(build_mask(), var, delta, numThreads));
1038
+ }
1039
+
1040
+ template <typename TyIntegral>
1041
+ _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const {
1042
+ details::assert_if_not_arithmetic<TyIntegral>();
1043
+ return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads));
1044
+ }
1045
+ #endif //_CG_CPP11_FEATURES
1046
+
1047
+ _CG_QUALIFIER int any(int predicate) const {
1048
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1049
+ return (lane_ballot != 0);
1050
+ }
1051
+ _CG_QUALIFIER int all(int predicate) const {
1052
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1053
+ return (lane_ballot == build_mask());
1054
+ }
1055
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
1056
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1057
+ return (lane_ballot >> (details::laneid() & (~(th::laneMask))));
1058
+ }
1059
+
1060
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
1061
+ template <typename TyIntegral>
1062
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
1063
+ details::assert_if_not_arithmetic<TyIntegral>();
1064
+ unsigned int lane_match = __match_any_sync(build_mask(), val);
1065
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1066
+ }
1067
+
1068
+ template <typename TyIntegral>
1069
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
1070
+ details::assert_if_not_arithmetic<TyIntegral>();
1071
+ unsigned int lane_match = __match_all_sync(build_mask(), val, &pred);
1072
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1073
+ }
1074
+ #endif
1075
+
1076
+ };
1077
+
1078
+ template <unsigned int Size, typename ParentT>
1079
+ class __static_parent_thread_block_tile_base
1080
+ {
1081
+ public:
1082
+ // Rank of this group in the upper level of the hierarchy
1083
+ _CG_STATIC_QUALIFIER unsigned int meta_group_rank() {
1084
+ return ParentT::thread_rank() / Size;
1085
+ }
1086
+
1087
+ // Total num partitions created out of all CTAs when the group was created
1088
+ _CG_STATIC_QUALIFIER unsigned int meta_group_size() {
1089
+ return (ParentT::size() + Size - 1) / Size;
1090
+ }
1091
+ };
1092
+
1093
+ /**
1094
+ * class thread_block_tile<unsigned int Size, ParentT = void>
1095
+ *
1096
+ * Statically-sized group type, representing one tile of a thread block.
1097
+ * The only specializations currently supported are those with native
1098
+ * hardware support (1/2/4/8/16/32)
1099
+ *
1100
+ * This group exposes warp-synchronous builtins.
1101
+ * Can only be constructed via tiled_partition<Size>(ParentT&)
1102
+ */
1103
+
1104
+ template <unsigned int Size, typename ParentT = void>
1105
+ class __single_warp_thread_block_tile :
1106
+ public __static_size_thread_block_tile_base<Size>,
1107
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1108
+ {
1109
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1110
+ friend class details::_coalesced_group_data_access;
1111
+
1112
+ protected:
1113
+ _CG_QUALIFIER __single_warp_thread_block_tile() { };
1114
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { };
1115
+
1116
+ _CG_STATIC_QUALIFIER unsigned int get_mask() {
1117
+ return __static_size_thread_block_tile_base<Size>::build_mask();
1118
+ }
1119
+ };
1120
+
1121
+ template <unsigned int Size>
1122
+ class __single_warp_thread_block_tile<Size, void> :
1123
+ public __static_size_thread_block_tile_base<Size>,
1124
+ public thread_group_base<details::coalesced_group_id>
1125
+ {
1126
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
1127
+
1128
+ template <unsigned int, typename ParentT> friend class __single_warp_thread_block_tile;
1129
+ friend class details::_coalesced_group_data_access;
1130
+
1131
+ typedef __static_size_thread_block_tile_base<numThreads> staticSizeBaseT;
1132
+
1133
+ protected:
1134
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) {
1135
+ _data.coalesced.mask = staticSizeBaseT::build_mask();
1136
+ _data.coalesced.size = numThreads;
1137
+ _data.coalesced.metaGroupRank = meta_group_rank;
1138
+ _data.coalesced.metaGroupSize = meta_group_size;
1139
+ _data.coalesced.is_tiled = true;
1140
+ }
1141
+
1142
+ _CG_QUALIFIER unsigned int get_mask() const {
1143
+ return (_data.coalesced.mask);
1144
+ }
1145
+
1146
+ public:
1147
+ using staticSizeBaseT::sync;
1148
+ using staticSizeBaseT::size;
1149
+ using staticSizeBaseT::num_threads;
1150
+ using staticSizeBaseT::thread_rank;
1151
+
1152
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1153
+ return _data.coalesced.metaGroupRank;
1154
+ }
1155
+
1156
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1157
+ return _data.coalesced.metaGroupSize;
1158
+ }
1159
+ };
1160
+
1161
+ /**
1162
+ * Outer level API calls
1163
+ * void sync(GroupT) - see <group_type>.sync()
1164
+ * void thread_rank(GroupT) - see <group_type>.thread_rank()
1165
+ * void group_size(GroupT) - see <group_type>.size()
1166
+ */
1167
+ template <class GroupT>
1168
+ _CG_QUALIFIER void sync(GroupT const &g)
1169
+ {
1170
+ g.sync();
1171
+ }
1172
+
1173
+ // TODO: Use a static dispatch to determine appropriate return type
1174
+ // C++03 is stuck with unsigned long long for now
1175
+ #ifdef _CG_CPP11_FEATURES
1176
+ template <class GroupT>
1177
+ _CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) {
1178
+ return g.thread_rank();
1179
+ }
1180
+
1181
+
1182
+ template <class GroupT>
1183
+ _CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) {
1184
+ return g.num_threads();
1185
+ }
1186
+ #else
1187
+ template <class GroupT>
1188
+ _CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) {
1189
+ return static_cast<unsigned long long>(g.thread_rank());
1190
+ }
1191
+
1192
+
1193
+ template <class GroupT>
1194
+ _CG_QUALIFIER unsigned long long group_size(GroupT const &g) {
1195
+ return static_cast<unsigned long long>(g.num_threads());
1196
+ }
1197
+ #endif
1198
+
1199
+
1200
+ /**
1201
+ * tiled_partition
1202
+ *
1203
+ * The tiled_partition(parent, tilesz) method is a collective operation that
1204
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1205
+ *
1206
+ * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will
1207
+ * be created where threads having identical k = (thread_rank(parent)/tilesz)
1208
+ * will be members of the same subgroup.
1209
+ *
1210
+ * The implementation may cause the calling thread to wait until all the members
1211
+ * of the parent group have invoked the operation before resuming execution.
1212
+ *
1213
+ * Functionality is limited to power-of-two sized subgorup instances of at most
1214
+ * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be
1215
+ * tiled_partition() in _CG_VERSION 1000.
1216
+ */
1217
+ _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz)
1218
+ {
1219
+ if (parent.get_type() == details::coalesced_group_id) {
1220
+ const coalesced_group *_cg = static_cast<const coalesced_group*>(&parent);
1221
+ return _cg->_get_tiled_threads(tilesz);
1222
+ }
1223
+ else {
1224
+ const thread_block *_tb = static_cast<const thread_block*>(&parent);
1225
+ return _tb->_get_tiled_threads(tilesz);
1226
+ }
1227
+ }
1228
+
1229
+ // Thread block type overload: returns a basic thread_group for now (may be specialized later)
1230
+ _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz)
1231
+ {
1232
+ return (parent._get_tiled_threads(tilesz));
1233
+ }
1234
+
1235
+ // Coalesced group type overload: retains its ability to stay coalesced
1236
+ _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz)
1237
+ {
1238
+ return (parent._get_tiled_threads(tilesz));
1239
+ }
1240
+
1241
+ namespace details {
1242
+ template <unsigned int Size, typename ParentT>
1243
+ class internal_thread_block_tile : public __single_warp_thread_block_tile<Size, ParentT> {};
1244
+
1245
+ template <unsigned int Size, typename ParentT>
1246
+ _CG_QUALIFIER internal_thread_block_tile<Size, ParentT> tiled_partition_internal() {
1247
+ return internal_thread_block_tile<Size, ParentT>();
1248
+ }
1249
+
1250
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1251
+ _CG_QUALIFIER TyVal multi_warp_collectives_helper(
1252
+ const GroupT& group,
1253
+ WarpLambda warp_lambda,
1254
+ InterWarpLambda inter_warp_lambda) {
1255
+ return group.template collectives_scheme<TyVal>(warp_lambda, inter_warp_lambda);
1256
+ }
1257
+
1258
+ template <typename T, typename GroupT>
1259
+ _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) {
1260
+ return group.template get_scratch_location<T>(warp_id);
1261
+ }
1262
+
1263
+ template <typename GroupT>
1264
+ _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) {
1265
+ return group.get_sync_location();
1266
+ }
1267
+
1268
+ }
1269
+ /**
1270
+ * tiled_partition<tilesz>
1271
+ *
1272
+ * The tiled_partition<tilesz>(parent) method is a collective operation that
1273
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1274
+ *
1275
+ * A total of ((size(parent)/tilesz) subgroups will be created,
1276
+ * therefore the parent group size must be evenly divisible by the tilesz.
1277
+ * The allow parent groups are thread_block or thread_block_tile<size>.
1278
+ *
1279
+ * The implementation may cause the calling thread to wait until all the members
1280
+ * of the parent group have invoked the operation before resuming execution.
1281
+ *
1282
+ * Functionality is limited to native hardware sizes, 1/2/4/8/16/32.
1283
+ * The size(parent) must be greater than the template Size parameter
1284
+ * otherwise the results are undefined.
1285
+ */
1286
+
1287
+ #if defined(_CG_CPP11_FEATURES)
1288
+ template <unsigned int Size>
1289
+ class __static_size_multi_warp_tile_base : public __static_size_tile_base<Size>
1290
+ {
1291
+ static_assert(details::_is_valid_multi_warp_tile<Size>::value, "Size must be one of 64/128/256/512");
1292
+
1293
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1294
+ friend __device__ TyVal details::multi_warp_collectives_helper(
1295
+ const GroupT& group,
1296
+ WarpLambda warp_lambda,
1297
+ InterWarpLambda inter_warp_lambda);
1298
+ template <typename T, typename GroupT>
1299
+ friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id);
1300
+ template <typename GroupT>
1301
+ friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group);
1302
+ template <unsigned int OtherSize>
1303
+ friend class __static_size_multi_warp_tile_base;
1304
+ using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
1305
+ using ThisType = __static_size_multi_warp_tile_base<Size>;
1306
+ _CG_STATIC_CONST_DECL int numWarps = Size / 32;
1307
+
1308
+ protected:
1309
+ details::multi_warp_scratch* const tile_memory;
1310
+
1311
+ template <typename GroupT>
1312
+ _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) {
1313
+ #if defined(_CG_HAS_RESERVED_SHARED)
1314
+ details::sync_warps_reset(get_sync_location(), details::cta::thread_rank());
1315
+ g.sync();
1316
+ #endif
1317
+ }
1318
+
1319
+
1320
+ private:
1321
+ _CG_QUALIFIER details::barrier_t* get_sync_location() const {
1322
+ // Different group sizes use different barriers, all groups of a given size share one barrier.
1323
+ unsigned int sync_id = details::log2(Size / 64);
1324
+ return &tile_memory->barriers[sync_id];
1325
+ }
1326
+
1327
+ template <typename T>
1328
+ _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const {
1329
+ unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id;
1330
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1331
+ }
1332
+
1333
+ template <typename T>
1334
+ _CG_QUALIFIER T* get_scratch_location() const {
1335
+ unsigned int scratch_id = details::cta::thread_rank() / 32;
1336
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1337
+ }
1338
+
1339
+ template <typename TyVal>
1340
+ _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const {
1341
+ unsigned int src_warp = src / 32;
1342
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1343
+ details::barrier_t* sync_location = get_sync_location();
1344
+
1345
+ // Get warp slot of the source threads warp.
1346
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>(src_warp);
1347
+
1348
+ if (warp.meta_group_rank() == src_warp) {
1349
+ warp.sync();
1350
+ // Put shuffled value into my warp slot and let my warp arrive at the barrier.
1351
+ if (thread_rank() == src) {
1352
+ *warp_scratch_location = val;
1353
+ }
1354
+ details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps);
1355
+ TyVal result = *warp_scratch_location;
1356
+ details::sync_warps_wait(sync_location, details::cta::thread_rank());
1357
+ return result;
1358
+ }
1359
+ else {
1360
+ // Wait for the source warp to arrive on the barrier.
1361
+ details::sync_warps_wait_for_specific_warp(sync_location,
1362
+ (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp));
1363
+ TyVal result = *warp_scratch_location;
1364
+ details::sync_warps(sync_location, details::cta::thread_rank(), numWarps);
1365
+ return result;
1366
+ }
1367
+ }
1368
+
1369
+ template <typename TyVal, typename WarpLambda, typename InterWarpLambda>
1370
+ _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const {
1371
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1372
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1373
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1374
+ details::barrier_t* sync_location = get_sync_location();
1375
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>();
1376
+
1377
+ warp_lambda(warp, warp_scratch_location);
1378
+
1379
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) {
1380
+ auto subwarp = details::tiled_partition_internal<numWarps, decltype(warp)>();
1381
+ if (subwarp.meta_group_rank() == 0) {
1382
+ TyVal* thread_scratch_location = get_scratch_location<TyVal>(subwarp.thread_rank());
1383
+ inter_warp_lambda(subwarp, thread_scratch_location);
1384
+ }
1385
+ warp.sync();
1386
+ details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps);
1387
+ }
1388
+ TyVal result = *warp_scratch_location;
1389
+ return result;
1390
+ }
1391
+
1392
+ public:
1393
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id;
1394
+
1395
+ using __static_size_tile_base<Size>::thread_rank;
1396
+
1397
+ template <typename TyVal>
1398
+ _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const {
1399
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1400
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1401
+ return shfl_impl(val, src);
1402
+ }
1403
+
1404
+ _CG_QUALIFIER void sync() const {
1405
+ details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps);
1406
+ }
1407
+
1408
+ _CG_QUALIFIER int any(int predicate) const {
1409
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1410
+ *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate);
1411
+ };
1412
+ auto inter_warp_lambda =
1413
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1414
+ *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1415
+ };
1416
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1417
+ }
1418
+
1419
+ _CG_QUALIFIER int all(int predicate) const {
1420
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1421
+ *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate);
1422
+ };
1423
+ auto inter_warp_lambda =
1424
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1425
+ *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1426
+ };
1427
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1428
+ }
1429
+ };
1430
+
1431
+
1432
+ template <unsigned int Size, typename ParentT = void>
1433
+ class __multi_warp_thread_block_tile :
1434
+ public __static_size_multi_warp_tile_base<Size>,
1435
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1436
+ {
1437
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1438
+ typedef __static_size_multi_warp_tile_base<Size> staticTileBaseT;
1439
+ protected:
1440
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) :
1441
+ __static_size_multi_warp_tile_base<Size>(g) {}
1442
+ };
1443
+
1444
+ template <unsigned int Size>
1445
+ class __multi_warp_thread_block_tile<Size, void> : public __static_size_multi_warp_tile_base<Size>
1446
+ {
1447
+ const unsigned int metaGroupRank;
1448
+ const unsigned int metaGroupSize;
1449
+
1450
+ protected:
1451
+ template <unsigned int OtherSize, typename ParentT>
1452
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile<OtherSize, ParentT>& g) :
1453
+ __static_size_multi_warp_tile_base<Size>(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {}
1454
+
1455
+ public:
1456
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1457
+ return metaGroupRank;
1458
+ }
1459
+
1460
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1461
+ return metaGroupSize;
1462
+ }
1463
+ };
1464
+ #endif
1465
+
1466
+ template <unsigned int Size, typename ParentT = void>
1467
+ class thread_block_tile;
1468
+
1469
+ namespace details {
1470
+ template <unsigned int Size, typename ParentT, bool IsMultiWarp>
1471
+ class thread_block_tile_impl;
1472
+
1473
+ template <unsigned int Size, typename ParentT>
1474
+ class thread_block_tile_impl<Size, ParentT, false>: public __single_warp_thread_block_tile<Size, ParentT>
1475
+ {
1476
+ protected:
1477
+ template <unsigned int OtherSize, typename OtherParentT, bool OtherIsMultiWarp>
1478
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl<OtherSize, OtherParentT, OtherIsMultiWarp>& g) :
1479
+ __single_warp_thread_block_tile<Size, ParentT>(g.meta_group_rank(), g.meta_group_size()) {}
1480
+
1481
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) :
1482
+ __single_warp_thread_block_tile<Size, ParentT>() {}
1483
+ };
1484
+
1485
+ #if defined(_CG_CPP11_FEATURES)
1486
+ template <unsigned int Size, typename ParentT>
1487
+ class thread_block_tile_impl<Size, ParentT, true> : public __multi_warp_thread_block_tile<Size, ParentT>
1488
+ {
1489
+ protected:
1490
+ template <typename GroupT>
1491
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) :
1492
+ __multi_warp_thread_block_tile<Size, ParentT>(g) {}
1493
+ };
1494
+ #else
1495
+ template <unsigned int Size, typename ParentT>
1496
+ class thread_block_tile_impl<Size, ParentT, true>
1497
+ {
1498
+ protected:
1499
+ template <typename GroupT>
1500
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {}
1501
+ };
1502
+ #endif
1503
+ }
1504
+
1505
+ template <unsigned int Size, typename ParentT>
1506
+ class thread_block_tile : public details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>
1507
+ {
1508
+ friend _CG_QUALIFIER thread_block_tile<1, void> this_thread();
1509
+
1510
+ protected:
1511
+ _CG_QUALIFIER thread_block_tile(const ParentT& g) :
1512
+ details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>(g) {}
1513
+
1514
+ public:
1515
+ _CG_QUALIFIER operator thread_block_tile<Size, void>() const {
1516
+ return thread_block_tile<Size, void>(*this);
1517
+ }
1518
+ };
1519
+
1520
+ template <unsigned int Size>
1521
+ class thread_block_tile<Size, void> : public details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>
1522
+ {
1523
+ template <unsigned int, typename ParentT>
1524
+ friend class thread_block_tile;
1525
+
1526
+ protected:
1527
+ template <unsigned int OtherSize, typename OtherParentT>
1528
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<OtherSize, OtherParentT>& g) :
1529
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1530
+
1531
+ public:
1532
+ template <typename ParentT>
1533
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<Size, ParentT>& g) :
1534
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1535
+ };
1536
+
1537
+ namespace details {
1538
+ template <unsigned int Size, typename ParentT>
1539
+ struct tiled_partition_impl;
1540
+
1541
+ template <unsigned int Size>
1542
+ struct tiled_partition_impl<Size, thread_block> : public thread_block_tile<Size, thread_block> {
1543
+ _CG_QUALIFIER tiled_partition_impl(const thread_block& g) :
1544
+ thread_block_tile<Size, thread_block>(g) {}
1545
+ };
1546
+
1547
+ // ParentT = static thread_block_tile<ParentSize, GrandParent> specialization
1548
+ template <unsigned int Size, unsigned int ParentSize, typename GrandParent>
1549
+ struct tiled_partition_impl<Size, thread_block_tile<ParentSize, GrandParent> > :
1550
+ public thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> > {
1551
+ #ifdef _CG_CPP11_FEATURES
1552
+ static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size");
1553
+ #endif
1554
+ _CG_QUALIFIER tiled_partition_impl(const thread_block_tile<ParentSize, GrandParent>& g) :
1555
+ thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> >(g) {}
1556
+ };
1557
+
1558
+ }
1559
+
1560
+ template <unsigned int Size, typename ParentT>
1561
+ _CG_QUALIFIER thread_block_tile<Size, ParentT> tiled_partition(const ParentT& g)
1562
+ {
1563
+ return details::tiled_partition_impl<Size, ParentT>(g);
1564
+ }
1565
+
1566
+ /**
1567
+ * thread_group this_thread()
1568
+ *
1569
+ * Constructs a generic thread_group containing only the calling thread
1570
+ */
1571
+ _CG_QUALIFIER thread_block_tile<1, void> this_thread()
1572
+ {
1573
+ // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its
1574
+ // meta group rank and size set to 0 and 1 respectively.
1575
+ return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block());
1576
+ }
1577
+
1578
+ /**
1579
+ * <group_type>.sync()
1580
+ *
1581
+ * Executes a barrier across the group
1582
+ *
1583
+ * Implements both a compiler fence and an architectural fence to prevent,
1584
+ * memory reordering around the barrier.
1585
+ */
1586
+ _CG_QUALIFIER void thread_group::sync() const
1587
+ {
1588
+ switch (_data.group.type) {
1589
+ case details::coalesced_group_id:
1590
+ cooperative_groups::sync(*static_cast<const coalesced_group*>(this));
1591
+ break;
1592
+ case details::thread_block_id:
1593
+ cooperative_groups::sync(*static_cast<const thread_block*>(this));
1594
+ break;
1595
+ case details::grid_group_id:
1596
+ cooperative_groups::sync(*static_cast<const grid_group*>(this));
1597
+ break;
1598
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1599
+ case details::multi_grid_group_id:
1600
+ cooperative_groups::sync(*static_cast<const multi_grid_group*>(this));
1601
+ break;
1602
+ #endif
1603
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1604
+ case details::cluster_group_id:
1605
+ cooperative_groups::sync(*static_cast<const cluster_group*>(this));
1606
+ break;
1607
+ #endif
1608
+ default:
1609
+ break;
1610
+ }
1611
+ }
1612
+
1613
+ /**
1614
+ * <group_type>.size()
1615
+ *
1616
+ * Returns the total number of threads in the group.
1617
+ */
1618
+ _CG_QUALIFIER unsigned long long thread_group::size() const
1619
+ {
1620
+ unsigned long long size = 0;
1621
+ switch (_data.group.type) {
1622
+ case details::coalesced_group_id:
1623
+ size = cooperative_groups::group_size(*static_cast<const coalesced_group*>(this));
1624
+ break;
1625
+ case details::thread_block_id:
1626
+ size = cooperative_groups::group_size(*static_cast<const thread_block*>(this));
1627
+ break;
1628
+ case details::grid_group_id:
1629
+ size = cooperative_groups::group_size(*static_cast<const grid_group*>(this));
1630
+ break;
1631
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1632
+ case details::multi_grid_group_id:
1633
+ size = cooperative_groups::group_size(*static_cast<const multi_grid_group*>(this));
1634
+ break;
1635
+ #endif
1636
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1637
+ case details::cluster_group_id:
1638
+ size = cooperative_groups::group_size(*static_cast<const cluster_group*>(this));
1639
+ break;
1640
+ #endif
1641
+ default:
1642
+ break;
1643
+ }
1644
+ return size;
1645
+ }
1646
+
1647
+ /**
1648
+ * <group_type>.thread_rank()
1649
+ *
1650
+ * Returns the linearized rank of the calling thread along the interval [0, size()).
1651
+ */
1652
+ _CG_QUALIFIER unsigned long long thread_group::thread_rank() const
1653
+ {
1654
+ unsigned long long rank = 0;
1655
+ switch (_data.group.type) {
1656
+ case details::coalesced_group_id:
1657
+ rank = cooperative_groups::thread_rank(*static_cast<const coalesced_group*>(this));
1658
+ break;
1659
+ case details::thread_block_id:
1660
+ rank = cooperative_groups::thread_rank(*static_cast<const thread_block*>(this));
1661
+ break;
1662
+ case details::grid_group_id:
1663
+ rank = cooperative_groups::thread_rank(*static_cast<const grid_group*>(this));
1664
+ break;
1665
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1666
+ case details::multi_grid_group_id:
1667
+ rank = cooperative_groups::thread_rank(*static_cast<const multi_grid_group*>(this));
1668
+ break;
1669
+ #endif
1670
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1671
+ case details::cluster_group_id:
1672
+ rank = cooperative_groups::thread_rank(*static_cast<const cluster_group*>(this));
1673
+ break;
1674
+ #endif
1675
+ default:
1676
+ break;
1677
+ }
1678
+ return rank;
1679
+ }
1680
+
1681
+ _CG_END_NAMESPACE
1682
+
1683
+ #include <cooperative_groups/details/partitioning.h>
1684
+ #if (!defined(_MSC_VER) || defined(_WIN64))
1685
+ # include <cooperative_groups/details/invoke.h>
1686
+ #endif
1687
+
1688
+ # endif /* ! (__cplusplus, __CUDACC__) */
1689
+
1690
+ #endif /* !_COOPERATIVE_GROUPS_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CU_COMPLEX_H_)
51
+ #define CU_COMPLEX_H_
52
+
53
+ #if !defined(__CUDACC_RTC__)
54
+ #if defined(__GNUC__)
55
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
56
+ #pragma GCC diagnostic ignored "-Wunused-function"
57
+ #endif
58
+ #endif
59
+ #endif
60
+
61
+ /* When trying to include C header file in C++ Code extern "C" is required
62
+ * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code
63
+ * extern "C" cannot be nested
64
+ * Hence keep the header out of extern "C" block
65
+ */
66
+
67
+ #if !defined(__CUDACC__)
68
+ #include <math.h> /* import fabsf, sqrt */
69
+ #endif /* !defined(__CUDACC__) */
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif /* __cplusplus */
74
+
75
+ #include "vector_types.h"
76
+
77
+ typedef float2 cuFloatComplex;
78
+
79
+ __host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x)
80
+ {
81
+ return x.x;
82
+ }
83
+
84
+ __host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x)
85
+ {
86
+ return x.y;
87
+ }
88
+
89
+ __host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex
90
+ (float r, float i)
91
+ {
92
+ cuFloatComplex res;
93
+ res.x = r;
94
+ res.y = i;
95
+ return res;
96
+ }
97
+
98
+ __host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x)
99
+ {
100
+ return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x));
101
+ }
102
+ __host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x,
103
+ cuFloatComplex y)
104
+ {
105
+ return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y),
106
+ cuCimagf(x) + cuCimagf(y));
107
+ }
108
+
109
+ __host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x,
110
+ cuFloatComplex y)
111
+ {
112
+ return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y),
113
+ cuCimagf(x) - cuCimagf(y));
114
+ }
115
+
116
+ /* This implementation could suffer from intermediate overflow even though
117
+ * the final result would be in range. However, various implementations do
118
+ * not guard against this (presumably to avoid losing performance), so we
119
+ * don't do it either to stay competitive.
120
+ */
121
+ __host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x,
122
+ cuFloatComplex y)
123
+ {
124
+ cuFloatComplex prod;
125
+ prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) -
126
+ (cuCimagf(x) * cuCimagf(y)),
127
+ (cuCrealf(x) * cuCimagf(y)) +
128
+ (cuCimagf(x) * cuCrealf(y)));
129
+ return prod;
130
+ }
131
+
132
+ /* This implementation guards against intermediate underflow and overflow
133
+ * by scaling. Such guarded implementations are usually the default for
134
+ * complex library implementations, with some also offering an unguarded,
135
+ * faster version.
136
+ */
137
+ __host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x,
138
+ cuFloatComplex y)
139
+ {
140
+ cuFloatComplex quot;
141
+ float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y));
142
+ float oos = 1.0f / s;
143
+ float ars = cuCrealf(x) * oos;
144
+ float ais = cuCimagf(x) * oos;
145
+ float brs = cuCrealf(y) * oos;
146
+ float bis = cuCimagf(y) * oos;
147
+ s = (brs * brs) + (bis * bis);
148
+ oos = 1.0f / s;
149
+ quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos,
150
+ ((ais * brs) - (ars * bis)) * oos);
151
+ return quot;
152
+ }
153
+
154
+ /*
155
+ * We would like to call hypotf(), but it's not available on all platforms.
156
+ * This discrete implementation guards against intermediate underflow and
157
+ * overflow by scaling. Otherwise we would lose half the exponent range.
158
+ * There are various ways of doing guarded computation. For now chose the
159
+ * simplest and fastest solution, however this may suffer from inaccuracies
160
+ * if sqrt and division are not IEEE compliant.
161
+ */
162
+ __host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x)
163
+ {
164
+ float a = cuCrealf(x);
165
+ float b = cuCimagf(x);
166
+ float v, w, t;
167
+ a = fabsf(a);
168
+ b = fabsf(b);
169
+ if (a > b) {
170
+ v = a;
171
+ w = b;
172
+ } else {
173
+ v = b;
174
+ w = a;
175
+ }
176
+ t = w / v;
177
+ t = 1.0f + t * t;
178
+ t = v * sqrtf(t);
179
+ if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
180
+ t = v + w;
181
+ }
182
+ return t;
183
+ }
184
+
185
+ /* Double precision */
186
+ typedef double2 cuDoubleComplex;
187
+
188
+ __host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x)
189
+ {
190
+ return x.x;
191
+ }
192
+
193
+ __host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x)
194
+ {
195
+ return x.y;
196
+ }
197
+
198
+ __host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex
199
+ (double r, double i)
200
+ {
201
+ cuDoubleComplex res;
202
+ res.x = r;
203
+ res.y = i;
204
+ return res;
205
+ }
206
+
207
+ __host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x)
208
+ {
209
+ return make_cuDoubleComplex (cuCreal(x), -cuCimag(x));
210
+ }
211
+
212
+ __host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x,
213
+ cuDoubleComplex y)
214
+ {
215
+ return make_cuDoubleComplex (cuCreal(x) + cuCreal(y),
216
+ cuCimag(x) + cuCimag(y));
217
+ }
218
+
219
+ __host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x,
220
+ cuDoubleComplex y)
221
+ {
222
+ return make_cuDoubleComplex (cuCreal(x) - cuCreal(y),
223
+ cuCimag(x) - cuCimag(y));
224
+ }
225
+
226
+ /* This implementation could suffer from intermediate overflow even though
227
+ * the final result would be in range. However, various implementations do
228
+ * not guard against this (presumably to avoid losing performance), so we
229
+ * don't do it either to stay competitive.
230
+ */
231
+ __host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x,
232
+ cuDoubleComplex y)
233
+ {
234
+ cuDoubleComplex prod;
235
+ prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) -
236
+ (cuCimag(x) * cuCimag(y)),
237
+ (cuCreal(x) * cuCimag(y)) +
238
+ (cuCimag(x) * cuCreal(y)));
239
+ return prod;
240
+ }
241
+
242
+ /* This implementation guards against intermediate underflow and overflow
243
+ * by scaling. Such guarded implementations are usually the default for
244
+ * complex library implementations, with some also offering an unguarded,
245
+ * faster version.
246
+ */
247
+ __host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x,
248
+ cuDoubleComplex y)
249
+ {
250
+ cuDoubleComplex quot;
251
+ double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y)));
252
+ double oos = 1.0 / s;
253
+ double ars = cuCreal(x) * oos;
254
+ double ais = cuCimag(x) * oos;
255
+ double brs = cuCreal(y) * oos;
256
+ double bis = cuCimag(y) * oos;
257
+ s = (brs * brs) + (bis * bis);
258
+ oos = 1.0 / s;
259
+ quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos,
260
+ ((ais * brs) - (ars * bis)) * oos);
261
+ return quot;
262
+ }
263
+
264
+ /* This implementation guards against intermediate underflow and overflow
265
+ * by scaling. Otherwise we would lose half the exponent range. There are
266
+ * various ways of doing guarded computation. For now chose the simplest
267
+ * and fastest solution, however this may suffer from inaccuracies if sqrt
268
+ * and division are not IEEE compliant.
269
+ */
270
+ __host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x)
271
+ {
272
+ double a = cuCreal(x);
273
+ double b = cuCimag(x);
274
+ double v, w, t;
275
+ a = fabs(a);
276
+ b = fabs(b);
277
+ if (a > b) {
278
+ v = a;
279
+ w = b;
280
+ } else {
281
+ v = b;
282
+ w = a;
283
+ }
284
+ t = w / v;
285
+ t = 1.0 + t * t;
286
+ t = v * sqrt(t);
287
+ if ((v == 0.0) ||
288
+ (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) {
289
+ t = v + w;
290
+ }
291
+ return t;
292
+ }
293
+
294
+ #if defined(__cplusplus)
295
+ }
296
+ #endif /* __cplusplus */
297
+
298
+ /* aliases */
299
+ typedef cuFloatComplex cuComplex;
300
+ __host__ __device__ static __inline__ cuComplex make_cuComplex (float x,
301
+ float y)
302
+ {
303
+ return make_cuFloatComplex (x, y);
304
+ }
305
+
306
+ /* float-to-double promotion */
307
+ __host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble
308
+ (cuFloatComplex c)
309
+ {
310
+ return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c));
311
+ }
312
+
313
+ __host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat
314
+ (cuDoubleComplex c)
315
+ {
316
+ return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c));
317
+ }
318
+
319
+
320
+ __host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d)
321
+ {
322
+ float real_res;
323
+ float imag_res;
324
+
325
+ real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d);
326
+ imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d);
327
+
328
+ real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res;
329
+ imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res;
330
+
331
+ return make_cuComplex(real_res, imag_res);
332
+ }
333
+
334
+ __host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d)
335
+ {
336
+ double real_res;
337
+ double imag_res;
338
+
339
+ real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d);
340
+ imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d);
341
+
342
+ real_res = -(cuCimag(x) * cuCimag(y)) + real_res;
343
+ imag_res = (cuCimag(x) * cuCreal(y)) + imag_res;
344
+
345
+ return make_cuDoubleComplex(real_res, imag_res);
346
+ }
347
+
348
+ #endif /* !defined(CU_COMPLEX_H_) */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAEGL_H
51
+ #define CUDAEGL_H
52
+
53
+ #include "cuda.h"
54
+ #include "EGL/egl.h"
55
+ #include "EGL/eglext.h"
56
+
57
+
58
+ #ifdef CUDA_FORCE_API_VERSION
59
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
60
+ #endif
61
+
62
+ #ifdef __cplusplus
63
+ extern "C" {
64
+ #endif
65
+
66
+ /**
67
+ * \addtogroup CUDA_TYPES
68
+ * @{
69
+ */
70
+
71
+ /**
72
+ * Maximum number of planes per frame
73
+ */
74
+ #define MAX_PLANES 3
75
+
76
+ /**
77
+ * CUDA EglFrame type - array or pointer
78
+ */
79
+ typedef enum CUeglFrameType_enum {
80
+ CU_EGL_FRAME_TYPE_ARRAY = 0, /**< Frame type CUDA array */
81
+ CU_EGL_FRAME_TYPE_PITCH = 1, /**< Frame type pointer */
82
+ } CUeglFrameType;
83
+
84
+ /**
85
+ * Indicates that timeout for ::cuEGLStreamConsumerAcquireFrame is infinite.
86
+ */
87
+ #define CUDA_EGL_INFINITE_TIMEOUT 0xFFFFFFFF
88
+
89
+ /**
90
+ * Resource location flags- sysmem or vidmem
91
+ *
92
+ * For CUDA context on iGPU, since video and system memory are equivalent -
93
+ * these flags will not have an effect on the execution.
94
+ *
95
+ * For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
96
+ * to give a hint about the desired location.
97
+ *
98
+ * ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
99
+ * to be accessed by CUDA.
100
+ *
101
+ * ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
102
+ * video memory to be accessed by CUDA.
103
+ *
104
+ * There may be an additional latency due to new allocation and data migration,
105
+ * if the frame is produced on a different memory.
106
+
107
+ */
108
+ typedef enum CUeglResourceLocationFlags_enum {
109
+ CU_EGL_RESOURCE_LOCATION_SYSMEM = 0x00, /**< Resource location sysmem */
110
+ CU_EGL_RESOURCE_LOCATION_VIDMEM = 0x01 /**< Resource location vidmem */
111
+ } CUeglResourceLocationFlags;
112
+
113
+ /**
114
+ * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
115
+ * Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY
116
+ */
117
+ typedef enum CUeglColorFormat_enum {
118
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0x00, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
119
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 0x01, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
120
+ CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 0x02, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
121
+ CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 0x03, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
122
+ CU_EGL_COLOR_FORMAT_RGB = 0x04, /**< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. */
123
+ CU_EGL_COLOR_FORMAT_BGR = 0x05, /**< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. */
124
+ CU_EGL_COLOR_FORMAT_ARGB = 0x06, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
125
+ CU_EGL_COLOR_FORMAT_RGBA = 0x07, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
126
+ CU_EGL_COLOR_FORMAT_L = 0x08, /**< single luminance channel in one surface. */
127
+ CU_EGL_COLOR_FORMAT_R = 0x09, /**< single color channel in one surface. */
128
+ CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 0x0A, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
129
+ CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 0x0B, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
130
+ CU_EGL_COLOR_FORMAT_YUYV_422 = 0x0C, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
131
+ CU_EGL_COLOR_FORMAT_UYVY_422 = 0x0D, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
132
+ CU_EGL_COLOR_FORMAT_ABGR = 0x0E, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
133
+ CU_EGL_COLOR_FORMAT_BGRA = 0x0F, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
134
+ CU_EGL_COLOR_FORMAT_A = 0x10, /**< Alpha color format - one channel in one surface. */
135
+ CU_EGL_COLOR_FORMAT_RG = 0x11, /**< R/G color format - two channels in one surface with GR byte ordering */
136
+ CU_EGL_COLOR_FORMAT_AYUV = 0x12, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
137
+ CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 0x13, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
138
+ CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 0x14, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
139
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 0x15, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
140
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 0x16, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
141
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 0x17, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
142
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 0x18, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
143
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 0x19, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
144
+ CU_EGL_COLOR_FORMAT_VYUY_ER = 0x1A, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
145
+ CU_EGL_COLOR_FORMAT_UYVY_ER = 0x1B, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
146
+ CU_EGL_COLOR_FORMAT_YUYV_ER = 0x1C, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
147
+ CU_EGL_COLOR_FORMAT_YVYU_ER = 0x1D, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
148
+ CU_EGL_COLOR_FORMAT_YUV_ER = 0x1E, /**< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */
149
+ CU_EGL_COLOR_FORMAT_YUVA_ER = 0x1F, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
150
+ CU_EGL_COLOR_FORMAT_AYUV_ER = 0x20, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
151
+ CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 0x21, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
152
+ CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 0x22, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
153
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 0x23, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
154
+ CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 0x24, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
155
+ CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 0x25, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
156
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 0x26, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
157
+ CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 0x27, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
158
+ CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 0x28, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
159
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 0x29, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
160
+ CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 0x2A, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
161
+ CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 0x2B, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
162
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 0x2C, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
163
+ CU_EGL_COLOR_FORMAT_BAYER_RGGB = 0x2D, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
164
+ CU_EGL_COLOR_FORMAT_BAYER_BGGR = 0x2E, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
165
+ CU_EGL_COLOR_FORMAT_BAYER_GRBG = 0x2F, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
166
+ CU_EGL_COLOR_FORMAT_BAYER_GBRG = 0x30, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
167
+ CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 0x31, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
168
+ CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 0x32, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
169
+ CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 0x33, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
170
+ CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 0x34, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
171
+ CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 0x35, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
172
+ CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 0x36, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
173
+ CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 0x37, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
174
+ CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 0x38, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
175
+ CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 0x39, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
176
+ CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 0x3A, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
177
+ CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 0x3B, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
178
+ CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 0x3C, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
179
+ CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 0x3D, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
180
+ CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 0x3E, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
181
+ CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 0x3F, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
182
+ CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 0x40, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
183
+ CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 0x41, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
184
+ CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 0x42, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
185
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 0x43, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
186
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 0x44, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
187
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 0x45, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
188
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 0x46, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
189
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 0x47, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
190
+ CU_EGL_COLOR_FORMAT_BAYER_BCCR = 0x48, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
191
+ CU_EGL_COLOR_FORMAT_BAYER_RCCB = 0x49, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
192
+ CU_EGL_COLOR_FORMAT_BAYER_CRBC = 0x4A, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
193
+ CU_EGL_COLOR_FORMAT_BAYER_CBRC = 0x4B, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
194
+ CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 0x4C, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
195
+ CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 0x4D, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
196
+ CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 0x4E, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
197
+ CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 0x4F, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
198
+ CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 0x50, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
199
+ CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 0x51, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
200
+ CU_EGL_COLOR_FORMAT_Y = 0x52, /**< Color format for single Y plane. */
201
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 0x53, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
202
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 0x54, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
203
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 0x55, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. */
204
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 0x56, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height
205
+ = 1/2 Y height. */
206
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 0x57, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
207
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 0x58, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
208
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 0x59, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height
209
+ = 1/2 Y height. */
210
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 0x5A, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
211
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 0x5B, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
212
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 0x5C, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
213
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 0x5D, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
214
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 0x5E, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
215
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 0x5F, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
216
+ CU_EGL_COLOR_FORMAT_Y_ER = 0x60, /**< Extended Range Color format for single Y plane. */
217
+ CU_EGL_COLOR_FORMAT_Y_709_ER = 0x61, /**< Extended Range Color format for single Y plane. */
218
+ CU_EGL_COLOR_FORMAT_Y10_ER = 0x62, /**< Extended Range Color format for single Y10 plane. */
219
+ CU_EGL_COLOR_FORMAT_Y10_709_ER = 0x63, /**< Extended Range Color format for single Y10 plane. */
220
+ CU_EGL_COLOR_FORMAT_Y12_ER = 0x64, /**< Extended Range Color format for single Y12 plane. */
221
+ CU_EGL_COLOR_FORMAT_Y12_709_ER = 0x65, /**< Extended Range Color format for single Y12 plane. */
222
+ CU_EGL_COLOR_FORMAT_YUVA = 0x66, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
223
+ CU_EGL_COLOR_FORMAT_YUV = 0x67, /**< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */
224
+ CU_EGL_COLOR_FORMAT_YVYU = 0x68, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
225
+ CU_EGL_COLOR_FORMAT_VYUY = 0x69, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
226
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 0x6A, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
227
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 0x6B, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
228
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 0x6C, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
229
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 0x6D, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
230
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 0x6E, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
231
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 0x6F, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
232
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 0x70, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
233
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 0x71, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
234
+ CU_EGL_COLOR_FORMAT_MAX
235
+ } CUeglColorFormat;
236
+
237
+ /**
238
+ * CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
239
+ *
240
+ * Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.
241
+ */
242
+ typedef struct CUeglFrame_st {
243
+ union {
244
+ CUarray pArray[MAX_PLANES]; /**< Array of CUarray corresponding to each plane*/
245
+ void* pPitch[MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
246
+ } frame;
247
+ unsigned int width; /**< Width of first plane */
248
+ unsigned int height; /**< Height of first plane */
249
+ unsigned int depth; /**< Depth of first plane */
250
+ unsigned int pitch; /**< Pitch of first plane */
251
+ unsigned int planeCount; /**< Number of planes */
252
+ unsigned int numChannels; /**< Number of channels for the plane */
253
+ CUeglFrameType frameType; /**< Array or Pitch */
254
+ CUeglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
255
+ CUarray_format cuFormat; /**< CUDA Array Format*/
256
+ } CUeglFrame_v1;
257
+ typedef CUeglFrame_v1 CUeglFrame;
258
+
259
+ /**
260
+ * CUDA EGLSream Connection
261
+ */
262
+ typedef struct CUeglStreamConnection_st* CUeglStreamConnection;
263
+
264
+ /** @} */ /* END CUDA_TYPES */
265
+
266
+ /**
267
+ * \file cudaEGL.h
268
+ * \brief Header file for the EGL interoperability functions of the
269
+ * low-level CUDA driver application programming interface.
270
+ */
271
+
272
+ /**
273
+ * \defgroup CUDA_EGL EGL Interoperability
274
+ * \ingroup CUDA_DRIVER
275
+ *
276
+ * ___MANBRIEF___ EGL interoperability functions of the low-level CUDA
277
+ * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
278
+ *
279
+ * This section describes the EGL interoperability functions of the
280
+ * low-level CUDA driver application programming interface.
281
+ *
282
+ * @{
283
+ */
284
+
285
+ /**
286
+ * \brief Registers an EGL image
287
+ *
288
+ * Registers the EGLImageKHR specified by \p image for access by
289
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
290
+ * Additional Mapping/Unmapping is not required for the registered resource and
291
+ * ::cuGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
292
+ *
293
+ * The application will be responsible for synchronizing access to shared objects.
294
+ * The application must ensure that any pending operation which access the objects have completed
295
+ * before passing control to CUDA. This may be accomplished by issuing and waiting for
296
+ * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
297
+ * The application will be also responsible for ensuring that any pending operation on the
298
+ * registered CUDA resource has completed prior to executing subsequent commands in other APIs
299
+ * accesing the same memory objects.
300
+ * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
301
+ *
302
+ * The surface's intended usage is specified using \p flags, as follows:
303
+ *
304
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
305
+ * resource will be used. It is therefore assumed that this resource will be
306
+ * read from and written to by CUDA. This is the default value.
307
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
308
+ * will not write to this resource.
309
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
310
+ * CUDA will not read from this resource and will write over the
311
+ * entire contents of the resource, so none of the data previously
312
+ * stored in the resource will be preserved.
313
+ *
314
+ * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
315
+ * typedef void* EGLImageKHR
316
+ *
317
+ * \param pCudaResource - Pointer to the returned object handle
318
+ * \param image - An EGLImageKHR image which can be used to create target resource.
319
+ * \param flags - Map flags
320
+ *
321
+ * \return
322
+ * ::CUDA_SUCCESS,
323
+ * ::CUDA_ERROR_INVALID_HANDLE,
324
+ * ::CUDA_ERROR_ALREADY_MAPPED,
325
+ * ::CUDA_ERROR_INVALID_CONTEXT,
326
+ *
327
+ * \sa ::cuGraphicsEGLRegisterImage, ::cuGraphicsUnregisterResource,
328
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
329
+ * ::cuGraphicsUnmapResources,
330
+ * ::cudaGraphicsEGLRegisterImage
331
+ */
332
+ CUresult CUDAAPI cuGraphicsEGLRegisterImage(CUgraphicsResource *pCudaResource, EGLImageKHR image, unsigned int flags);
333
+
334
+ /**
335
+ * \brief Connect CUDA to EGLStream as a consumer.
336
+ *
337
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream.
338
+ *
339
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
340
+ * API to another.
341
+ *
342
+ * \param conn - Pointer to the returned connection handle
343
+ * \param stream - EGLStreamKHR handle
344
+ *
345
+ * \return
346
+ * ::CUDA_SUCCESS,
347
+ * ::CUDA_ERROR_INVALID_HANDLE,
348
+ * ::CUDA_ERROR_INVALID_CONTEXT,
349
+ *
350
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
351
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
352
+ * ::cudaEGLStreamConsumerConnect
353
+ */
354
+ CUresult CUDAAPI cuEGLStreamConsumerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream);
355
+
356
+ /**
357
+ * \brief Connect CUDA to EGLStream as a consumer with given flags.
358
+ *
359
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags.
360
+ *
361
+ * The flags specify whether the consumer wants to access frames from system memory or video memory.
362
+ * Default is ::CU_EGL_RESOURCE_LOCATION_VIDMEM.
363
+ *
364
+ * \param conn - Pointer to the returned connection handle
365
+ * \param stream - EGLStreamKHR handle
366
+ * \param flags - Flags denote intended location - system or video.
367
+ *
368
+ * \return
369
+ * ::CUDA_SUCCESS,
370
+ * ::CUDA_ERROR_INVALID_HANDLE,
371
+ * ::CUDA_ERROR_INVALID_CONTEXT,
372
+ *
373
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
374
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
375
+ * ::cudaEGLStreamConsumerConnectWithFlags
376
+ */
377
+
378
+ CUresult CUDAAPI cuEGLStreamConsumerConnectWithFlags(CUeglStreamConnection *conn, EGLStreamKHR stream, unsigned int flags);
379
+
380
+ /**
381
+ * \brief Disconnect CUDA as a consumer to EGLStream .
382
+ *
383
+ * Disconnect CUDA as a consumer to EGLStreamKHR.
384
+ *
385
+ * \param conn - Conection to disconnect.
386
+ *
387
+ * \return
388
+ * ::CUDA_SUCCESS,
389
+ * ::CUDA_ERROR_INVALID_HANDLE,
390
+ * ::CUDA_ERROR_INVALID_CONTEXT,
391
+ *
392
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
393
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
394
+ * ::cudaEGLStreamConsumerDisconnect
395
+ */
396
+ CUresult CUDAAPI cuEGLStreamConsumerDisconnect(CUeglStreamConnection *conn);
397
+
398
+ /**
399
+ * \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
400
+ *
401
+ * Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented
402
+ * by the producer unless explicitly disabled by setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE
403
+ * during stream initialization. By default, EGLStream is created with this flag set to EGL_TRUE.
404
+ * ::cuGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
405
+ * ::CUeglFrame.
406
+ *
407
+ * \param conn - Connection on which to acquire
408
+ * \param pCudaResource - CUDA resource on which the stream frame will be mapped for use.
409
+ * \param pStream - CUDA stream for synchronization and any data migrations
410
+ * implied by ::CUeglResourceLocationFlags.
411
+ * \param timeout - Desired timeout in usec for a new frame to be acquired.
412
+ * If set as ::CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely.
413
+ * After timeout occurs CUDA consumer tries to acquire an old frame
414
+ * if available and EGL_SUPPORT_REUSE_NV flag is set.
415
+ *
416
+ * \return
417
+ * ::CUDA_SUCCESS,
418
+ * ::CUDA_ERROR_INVALID_HANDLE,
419
+ * ::CUDA_ERROR_LAUNCH_TIMEOUT,
420
+ *
421
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
422
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
423
+ * ::cudaEGLStreamConsumerAcquireFrame
424
+ */
425
+ CUresult CUDAAPI cuEGLStreamConsumerAcquireFrame(CUeglStreamConnection *conn,
426
+ CUgraphicsResource *pCudaResource, CUstream *pStream, unsigned int timeout);
427
+ /**
428
+ * \brief Releases the last frame acquired from the EGLStream.
429
+ *
430
+ * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
431
+ * If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the time of EGL creation
432
+ * this API doesn't release the last frame acquired on the EGLStream.
433
+ * By default, EGLStream is created with this flag set to EGL_TRUE.
434
+ *
435
+ * \param conn - Connection on which to release
436
+ * \param pCudaResource - CUDA resource whose corresponding frame is to be released
437
+ * \param pStream - CUDA stream on which release will be done.
438
+ *
439
+ * \return
440
+ * ::CUDA_SUCCESS,
441
+ * ::CUDA_ERROR_INVALID_HANDLE,
442
+ *
443
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
444
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
445
+ * ::cudaEGLStreamConsumerReleaseFrame
446
+ */
447
+ CUresult CUDAAPI cuEGLStreamConsumerReleaseFrame(CUeglStreamConnection *conn,
448
+ CUgraphicsResource pCudaResource, CUstream *pStream);
449
+
450
+ /**
451
+ * \brief Connect CUDA to EGLStream as a producer.
452
+ *
453
+ * Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
454
+ *
455
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
456
+ * API to another.
457
+ *
458
+ * \param conn - Pointer to the returned connection handle
459
+ * \param stream - EGLStreamKHR handle
460
+ * \param width - width of the image to be submitted to the stream
461
+ * \param height - height of the image to be submitted to the stream
462
+ *
463
+ * \return
464
+ * ::CUDA_SUCCESS,
465
+ * ::CUDA_ERROR_INVALID_HANDLE,
466
+ * ::CUDA_ERROR_INVALID_CONTEXT,
467
+ *
468
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
469
+ * ::cuEGLStreamProducerPresentFrame,
470
+ * ::cudaEGLStreamProducerConnect
471
+ */
472
+ CUresult CUDAAPI cuEGLStreamProducerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream,
473
+ EGLint width, EGLint height);
474
+
475
+ /**
476
+ * \brief Disconnect CUDA as a producer to EGLStream .
477
+ *
478
+ * Disconnect CUDA as a producer to EGLStreamKHR.
479
+ *
480
+ * \param conn - Conection to disconnect.
481
+ *
482
+ * \return
483
+ * ::CUDA_SUCCESS,
484
+ * ::CUDA_ERROR_INVALID_HANDLE,
485
+ * ::CUDA_ERROR_INVALID_CONTEXT,
486
+ *
487
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
488
+ * ::cuEGLStreamProducerPresentFrame,
489
+ * ::cudaEGLStreamProducerDisconnect
490
+ */
491
+ CUresult CUDAAPI cuEGLStreamProducerDisconnect(CUeglStreamConnection *conn);
492
+
493
+ /**
494
+ * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
495
+ *
496
+ * When a frame is presented by the producer, it gets associated with the EGLStream
497
+ * and thus it is illegal to free the frame before the producer is disconnected.
498
+ * If a frame is freed and reused it may lead to undefined behavior.
499
+ *
500
+ * If producer and consumer are on different GPUs (iGPU and dGPU) then frametype
501
+ * ::CU_EGL_FRAME_TYPE_ARRAY is not supported. ::CU_EGL_FRAME_TYPE_PITCH can be used for
502
+ * such cross-device applications.
503
+ *
504
+ * The ::CUeglFrame is defined as:
505
+ * \code
506
+ * typedef struct CUeglFrame_st {
507
+ * union {
508
+ * CUarray pArray[MAX_PLANES];
509
+ * void* pPitch[MAX_PLANES];
510
+ * } frame;
511
+ * unsigned int width;
512
+ * unsigned int height;
513
+ * unsigned int depth;
514
+ * unsigned int pitch;
515
+ * unsigned int planeCount;
516
+ * unsigned int numChannels;
517
+ * CUeglFrameType frameType;
518
+ * CUeglColorFormat eglColorFormat;
519
+ * CUarray_format cuFormat;
520
+ * } CUeglFrame;
521
+ * \endcode
522
+ *
523
+ * For ::CUeglFrame of type ::CU_EGL_FRAME_TYPE_PITCH, the application may present sub-region of a memory
524
+ * allocation. In that case, the pitched pointer will specify the start address of the sub-region in
525
+ * the allocation and corresponding ::CUeglFrame fields will specify the dimensions of the sub-region.
526
+ *
527
+ * \param conn - Connection on which to present the CUDA array
528
+ * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
529
+ * \param pStream - CUDA stream on which to present the frame.
530
+ *
531
+ * \return
532
+ * ::CUDA_SUCCESS,
533
+ * ::CUDA_ERROR_INVALID_HANDLE,
534
+ *
535
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
536
+ * ::cuEGLStreamProducerReturnFrame,
537
+ * ::cudaEGLStreamProducerPresentFrame
538
+ */
539
+ CUresult CUDAAPI cuEGLStreamProducerPresentFrame(CUeglStreamConnection *conn,
540
+ CUeglFrame eglframe, CUstream *pStream);
541
+
542
+ /**
543
+ * \brief Return the CUDA eglFrame to the EGLStream released by the consumer.
544
+ *
545
+ * This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not
546
+ * returned a frame to EGL stream. If timeout is returned the application can retry.
547
+ *
548
+ * \param conn - Connection on which to return
549
+ * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
550
+ * \param pStream - CUDA stream on which to return the frame.
551
+ *
552
+ * \return
553
+ * ::CUDA_SUCCESS,
554
+ * ::CUDA_ERROR_INVALID_HANDLE,
555
+ * ::CUDA_ERROR_LAUNCH_TIMEOUT
556
+ *
557
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
558
+ * ::cuEGLStreamProducerPresentFrame,
559
+ * ::cudaEGLStreamProducerReturnFrame
560
+ */
561
+ CUresult CUDAAPI cuEGLStreamProducerReturnFrame(CUeglStreamConnection *conn,
562
+ CUeglFrame *eglframe, CUstream *pStream);
563
+
564
+ /**
565
+ * \brief Get an eglFrame through which to access a registered EGL graphics resource.
566
+ *
567
+ * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
568
+ * \p resource may be accessed.
569
+ * This API can only be called for registered EGL graphics resources.
570
+ *
571
+ * The ::CUeglFrame is defined as:
572
+ * \code
573
+ * typedef struct CUeglFrame_st {
574
+ * union {
575
+ * CUarray pArray[MAX_PLANES];
576
+ * void* pPitch[MAX_PLANES];
577
+ * } frame;
578
+ * unsigned int width;
579
+ * unsigned int height;
580
+ * unsigned int depth;
581
+ * unsigned int pitch;
582
+ * unsigned int planeCount;
583
+ * unsigned int numChannels;
584
+ * CUeglFrameType frameType;
585
+ * CUeglColorFormat eglColorFormat;
586
+ * CUarray_format cuFormat;
587
+ * } CUeglFrame;
588
+ * \endcode
589
+ *
590
+ * If \p resource is not registered then ::CUDA_ERROR_NOT_MAPPED is returned.
591
+ * *
592
+ * \param eglFrame - Returned eglFrame.
593
+ * \param resource - Registered resource to access.
594
+ * \param index - Index for cubemap surfaces.
595
+ * \param mipLevel - Mipmap level for the subresource to access.
596
+ *
597
+ * \return
598
+ * ::CUDA_SUCCESS,
599
+ * ::CUDA_ERROR_DEINITIALIZED,
600
+ * ::CUDA_ERROR_NOT_INITIALIZED,
601
+ * ::CUDA_ERROR_INVALID_CONTEXT,
602
+ * ::CUDA_ERROR_INVALID_VALUE,
603
+ * ::CUDA_ERROR_INVALID_HANDLE,
604
+ * ::CUDA_ERROR_NOT_MAPPED
605
+ *
606
+ * \sa
607
+ * ::cuGraphicsMapResources,
608
+ * ::cuGraphicsSubResourceGetMappedArray,
609
+ * ::cuGraphicsResourceGetMappedPointer,
610
+ * ::cudaGraphicsResourceGetMappedEglFrame
611
+ */
612
+ CUresult CUDAAPI cuGraphicsResourceGetMappedEglFrame(CUeglFrame* eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
613
+
614
+ /**
615
+ * \brief Creates an event from EGLSync object
616
+ *
617
+ * Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified
618
+ * via \p flags. Valid flags include:
619
+ * - ::CU_EVENT_DEFAULT: Default event creation flag.
620
+ * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking
621
+ * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on
622
+ * an event created with this flag will block until the event has actually
623
+ * been completed.
624
+ *
625
+ * Once the \p eglSync gets destroyed, ::cuEventDestroy is the only API
626
+ * that can be invoked on the event.
627
+ *
628
+ * ::cuEventRecord and TimingData are not supported for events created from EGLSync.
629
+ *
630
+ * The EGLSyncKHR is an opaque handle to an EGL sync object.
631
+ * typedef void* EGLSyncKHR
632
+ *
633
+ * \param phEvent - Returns newly created event
634
+ * \param eglSync - Opaque handle to EGLSync object
635
+ * \param flags - Event creation flags
636
+ *
637
+ * \return
638
+ * ::CUDA_SUCCESS,
639
+ * ::CUDA_ERROR_DEINITIALIZED,
640
+ * ::CUDA_ERROR_NOT_INITIALIZED,
641
+ * ::CUDA_ERROR_INVALID_CONTEXT,
642
+ * ::CUDA_ERROR_INVALID_VALUE,
643
+ * ::CUDA_ERROR_OUT_OF_MEMORY
644
+ *
645
+ * \sa
646
+ * ::cuEventQuery,
647
+ * ::cuEventSynchronize,
648
+ * ::cuEventDestroy
649
+ */
650
+ CUresult CUDAAPI cuEventCreateFromEGLSync(CUevent *phEvent, EGLSyncKHR eglSync, unsigned int flags);
651
+
652
+ /** @} */ /* END CUDA_EGL */
653
+
654
+ #ifdef __cplusplus
655
+ };
656
+ #endif
657
+
658
+ #endif
659
+
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAEGLTYPEDEFS_H
51
+ #define CUDAEGLTYPEDEFS_H
52
+
53
+ #include <cudaEGL.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaEGL.h
61
+ */
62
+ #define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000
63
+ #define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000
64
+ #define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000
65
+ #define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000
66
+ #define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000
67
+ #define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000
68
+ #define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000
69
+ #define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000
70
+ #define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000
71
+ #define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000
72
+ #define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000
73
+ #define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000
74
+
75
+
76
+ /**
77
+ * Type definitions for functions defined in cudaEGL.h
78
+ */
79
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags);
80
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream);
81
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags);
82
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
83
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout);
84
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream);
85
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height);
86
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
87
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream);
88
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream);
89
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
90
+ typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags);
91
+
92
+ #ifdef __cplusplus
93
+ }
94
+ #endif // __cplusplus
95
+
96
+ #endif // file guard
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAGL_H
51
+ #define CUDAGL_H
52
+
53
+ #include <cuda.h>
54
+ #include <GL/gl.h>
55
+
56
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
57
+ #define __CUDA_DEPRECATED
58
+ #elif defined(_MSC_VER)
59
+ #define __CUDA_DEPRECATED __declspec(deprecated)
60
+ #elif defined(__GNUC__)
61
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
62
+ #else
63
+ #define __CUDA_DEPRECATED
64
+ #endif
65
+
66
+ #ifdef CUDA_FORCE_API_VERSION
67
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
68
+ #endif
69
+
70
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
71
+ #define __CUDA_API_PER_THREAD_DEFAULT_STREAM
72
+ #define __CUDA_API_PTDS(api) api ## _ptds
73
+ #define __CUDA_API_PTSZ(api) api ## _ptsz
74
+ #else
75
+ #define __CUDA_API_PTDS(api) api
76
+ #define __CUDA_API_PTSZ(api) api
77
+ #endif
78
+
79
+ #define cuGLCtxCreate cuGLCtxCreate_v2
80
+ #define cuGLMapBufferObject __CUDA_API_PTDS(cuGLMapBufferObject_v2)
81
+ #define cuGLMapBufferObjectAsync __CUDA_API_PTSZ(cuGLMapBufferObjectAsync_v2)
82
+ #define cuGLGetDevices cuGLGetDevices_v2
83
+
84
+ #ifdef __cplusplus
85
+ extern "C" {
86
+ #endif
87
+
88
+ /**
89
+ * \file cudaGL.h
90
+ * \brief Header file for the OpenGL interoperability functions of the
91
+ * low-level CUDA driver application programming interface.
92
+ */
93
+
94
+ /**
95
+ * \defgroup CUDA_GL OpenGL Interoperability
96
+ * \ingroup CUDA_DRIVER
97
+ *
98
+ * ___MANBRIEF___ OpenGL interoperability functions of the low-level CUDA
99
+ * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
100
+ *
101
+ * This section describes the OpenGL interoperability functions of the
102
+ * low-level CUDA driver application programming interface. Note that mapping
103
+ * of OpenGL resources is performed with the graphics API agnostic, resource
104
+ * mapping interface described in \ref CUDA_GRAPHICS "Graphics Interoperability".
105
+ *
106
+ * @{
107
+ */
108
+
109
+ #if defined(_WIN32)
110
+ #if !defined(WGL_NV_gpu_affinity)
111
+ typedef void* HGPUNV;
112
+ #endif
113
+ #endif /* _WIN32 */
114
+
115
+ /**
116
+ * \brief Registers an OpenGL buffer object
117
+ *
118
+ * Registers the buffer object specified by \p buffer for access by
119
+ * CUDA. A handle to the registered object is returned as \p
120
+ * pCudaResource. The register flags \p Flags specify the intended usage,
121
+ * as follows:
122
+ *
123
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
124
+ * resource will be used. It is therefore assumed that this resource will be
125
+ * read from and written to by CUDA. This is the default value.
126
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
127
+ * will not write to this resource.
128
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
129
+ * CUDA will not read from this resource and will write over the
130
+ * entire contents of the resource, so none of the data previously
131
+ * stored in the resource will be preserved.
132
+ *
133
+ * \param pCudaResource - Pointer to the returned object handle
134
+ * \param buffer - name of buffer object to be registered
135
+ * \param Flags - Register flags
136
+ *
137
+ * \return
138
+ * ::CUDA_SUCCESS,
139
+ * ::CUDA_ERROR_INVALID_HANDLE,
140
+ * ::CUDA_ERROR_ALREADY_MAPPED,
141
+ * ::CUDA_ERROR_INVALID_CONTEXT,
142
+ * ::CUDA_ERROR_OPERATING_SYSTEM
143
+ * \notefnerr
144
+ *
145
+ * \sa
146
+ * ::cuGraphicsUnregisterResource,
147
+ * ::cuGraphicsMapResources,
148
+ * ::cuGraphicsResourceGetMappedPointer,
149
+ * ::cudaGraphicsGLRegisterBuffer
150
+ */
151
+ CUresult CUDAAPI cuGraphicsGLRegisterBuffer(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags);
152
+
153
+ /**
154
+ * \brief Register an OpenGL texture or renderbuffer object
155
+ *
156
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
157
+ * A handle to the registered object is returned as \p pCudaResource.
158
+ *
159
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
160
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
161
+ * or ::GL_RENDERBUFFER.
162
+ *
163
+ * The register flags \p Flags specify the intended usage, as follows:
164
+ *
165
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
166
+ * resource will be used. It is therefore assumed that this resource will be
167
+ * read from and written to by CUDA. This is the default value.
168
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
169
+ * will not write to this resource.
170
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
171
+ * CUDA will not read from this resource and will write over the
172
+ * entire contents of the resource, so none of the data previously
173
+ * stored in the resource will be preserved.
174
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will
175
+ * bind this resource to a surface reference.
176
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform
177
+ * texture gather operations on this resource.
178
+ *
179
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
180
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
181
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
182
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
183
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
184
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
185
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
186
+ *
187
+ * The following image classes are currently disallowed:
188
+ * - Textures with borders
189
+ * - Multisampled renderbuffers
190
+ *
191
+ * \param pCudaResource - Pointer to the returned object handle
192
+ * \param image - name of texture or renderbuffer object to be registered
193
+ * \param target - Identifies the type of object specified by \p image
194
+ * \param Flags - Register flags
195
+ *
196
+ * \return
197
+ * ::CUDA_SUCCESS,
198
+ * ::CUDA_ERROR_INVALID_HANDLE,
199
+ * ::CUDA_ERROR_ALREADY_MAPPED,
200
+ * ::CUDA_ERROR_INVALID_CONTEXT,
201
+ * ::CUDA_ERROR_OPERATING_SYSTEM
202
+ * \notefnerr
203
+ *
204
+ * \sa
205
+ * ::cuGraphicsUnregisterResource,
206
+ * ::cuGraphicsMapResources,
207
+ * ::cuGraphicsSubResourceGetMappedArray,
208
+ * ::cudaGraphicsGLRegisterImage
209
+ */
210
+ CUresult CUDAAPI cuGraphicsGLRegisterImage(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags);
211
+
212
+ #ifdef _WIN32
213
+ /**
214
+ * \brief Gets the CUDA device associated with hGpu
215
+ *
216
+ * Returns in \p *pDevice the CUDA device associated with a \p hGpu, if
217
+ * applicable.
218
+ *
219
+ * \param pDevice - Device associated with hGpu
220
+ * \param hGpu - Handle to a GPU, as queried via ::WGL_NV_gpu_affinity()
221
+ *
222
+ * \return
223
+ * ::CUDA_SUCCESS,
224
+ * ::CUDA_ERROR_DEINITIALIZED,
225
+ * ::CUDA_ERROR_NOT_INITIALIZED,
226
+ * ::CUDA_ERROR_INVALID_CONTEXT,
227
+ * ::CUDA_ERROR_INVALID_VALUE
228
+ * \notefnerr
229
+ *
230
+ * \sa ::cuGLMapBufferObject,
231
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
232
+ * ::cuGLUnregisterBufferObject, ::cuGLUnmapBufferObjectAsync,
233
+ * ::cuGLSetBufferObjectMapFlags,
234
+ * ::cudaWGLGetDevice
235
+ */
236
+ CUresult CUDAAPI cuWGLGetDevice(CUdevice *pDevice, HGPUNV hGpu);
237
+ #endif /* _WIN32 */
238
+
239
+ /**
240
+ * CUDA devices corresponding to an OpenGL device
241
+ */
242
+ typedef enum CUGLDeviceList_enum {
243
+ CU_GL_DEVICE_LIST_ALL = 0x01, /**< The CUDA devices for all GPUs used by the current OpenGL context */
244
+ CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
245
+ CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03, /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
246
+ } CUGLDeviceList;
247
+
248
+ /**
249
+ * \brief Gets the CUDA devices associated with the current OpenGL context
250
+ *
251
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
252
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
253
+ * at most cudaDeviceCount of the CUDA-compatible devices corresponding to
254
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
255
+ * context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE.
256
+ *
257
+ * The \p deviceList argument may be any of the following:
258
+ * - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context.
259
+ * - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to
260
+ * render the current frame (in SLI).
261
+ * - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to
262
+ * render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that
263
+ * this is correct in all cases.
264
+ *
265
+ * \param pCudaDeviceCount - Returned number of CUDA devices.
266
+ * \param pCudaDevices - Returned CUDA devices.
267
+ * \param cudaDeviceCount - The size of the output device array pCudaDevices.
268
+ * \param deviceList - The set of devices to return.
269
+ *
270
+ * \return
271
+ * ::CUDA_SUCCESS,
272
+ * ::CUDA_ERROR_NO_DEVICE,
273
+ * ::CUDA_ERROR_INVALID_VALUE,
274
+ * ::CUDA_ERROR_INVALID_CONTEXT,
275
+ * ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT,
276
+ * ::CUDA_ERROR_OPERATING_SYSTEM
277
+ *
278
+ * \notefnerr
279
+ *
280
+ * \sa
281
+ * ::cuWGLGetDevice,
282
+ * ::cudaGLGetDevices
283
+ */
284
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
285
+
286
+ /**
287
+ * \defgroup CUDA_GL_DEPRECATED OpenGL Interoperability [DEPRECATED]
288
+ *
289
+ * ___MANBRIEF___ deprecated OpenGL interoperability functions of the low-level
290
+ * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
291
+ *
292
+ * This section describes deprecated OpenGL interoperability functionality.
293
+ *
294
+ * @{
295
+ */
296
+
297
+ /** Flags to map or unmap a resource */
298
+ typedef enum CUGLmap_flags_enum {
299
+ CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00,
300
+ CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01,
301
+ CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02,
302
+ } CUGLmap_flags;
303
+
304
+ /**
305
+ * \brief Create a CUDA context for interoperability with OpenGL
306
+ *
307
+ * \deprecated This function is deprecated as of Cuda 5.0.
308
+ *
309
+ * This function is deprecated and should no longer be used. It is
310
+ * no longer necessary to associate a CUDA context with an OpenGL
311
+ * context in order to achieve maximum interoperability performance.
312
+ *
313
+ * \param pCtx - Returned CUDA context
314
+ * \param Flags - Options for CUDA context creation
315
+ * \param device - Device on which to create the context
316
+ *
317
+ * \return
318
+ * ::CUDA_SUCCESS,
319
+ * ::CUDA_ERROR_DEINITIALIZED,
320
+ * ::CUDA_ERROR_NOT_INITIALIZED,
321
+ * ::CUDA_ERROR_INVALID_CONTEXT,
322
+ * ::CUDA_ERROR_INVALID_VALUE,
323
+ * ::CUDA_ERROR_OUT_OF_MEMORY
324
+ * \notefnerr
325
+ *
326
+ * \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject,
327
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
328
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
329
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
330
+ * ::cuWGLGetDevice
331
+ */
332
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
333
+
334
+ /**
335
+ * \brief Initializes OpenGL interoperability
336
+ *
337
+ * \deprecated This function is deprecated as of Cuda 3.0.
338
+ *
339
+ * Initializes OpenGL interoperability. This function is deprecated
340
+ * and calling it is no longer required. It may fail if the needed
341
+ * OpenGL driver facilities are not available.
342
+ *
343
+ * \return
344
+ * ::CUDA_SUCCESS,
345
+ * ::CUDA_ERROR_DEINITIALIZED,
346
+ * ::CUDA_ERROR_NOT_INITIALIZED,
347
+ * ::CUDA_ERROR_INVALID_CONTEXT,
348
+ * ::CUDA_ERROR_UNKNOWN
349
+ * \notefnerr
350
+ *
351
+ * \sa ::cuGLMapBufferObject,
352
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
353
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
354
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
355
+ * ::cuWGLGetDevice
356
+ */
357
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLInit(void);
358
+
359
+ /**
360
+ * \brief Registers an OpenGL buffer object
361
+ *
362
+ * \deprecated This function is deprecated as of Cuda 3.0.
363
+ *
364
+ * Registers the buffer object specified by \p buffer for access by
365
+ * CUDA. This function must be called before CUDA can map the buffer
366
+ * object. There must be a valid OpenGL context bound to the current
367
+ * thread when this function is called, and the buffer name is
368
+ * resolved by that context.
369
+ *
370
+ * \param buffer - The name of the buffer object to register.
371
+ *
372
+ * \return
373
+ * ::CUDA_SUCCESS,
374
+ * ::CUDA_ERROR_DEINITIALIZED,
375
+ * ::CUDA_ERROR_NOT_INITIALIZED,
376
+ * ::CUDA_ERROR_INVALID_CONTEXT,
377
+ * ::CUDA_ERROR_ALREADY_MAPPED
378
+ * \notefnerr
379
+ *
380
+ * \sa ::cuGraphicsGLRegisterBuffer
381
+ */
382
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLRegisterBufferObject(GLuint buffer);
383
+
384
+ /**
385
+ * \brief Maps an OpenGL buffer object
386
+ *
387
+ * \deprecated This function is deprecated as of Cuda 3.0.
388
+ *
389
+ * Maps the buffer object specified by \p buffer into the address space of the
390
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
391
+ * and size of the resulting mapping.
392
+ *
393
+ * There must be a valid OpenGL context bound to the current thread
394
+ * when this function is called. This must be the same context, or a
395
+ * member of the same shareGroup, as the context that was bound when
396
+ * the buffer was registered.
397
+ *
398
+ * All streams in the current CUDA context are synchronized with the
399
+ * current GL context.
400
+ *
401
+ * \param dptr - Returned mapped base pointer
402
+ * \param size - Returned size of mapping
403
+ * \param buffer - The name of the buffer object to map
404
+ *
405
+ * \return
406
+ * ::CUDA_SUCCESS,
407
+ * ::CUDA_ERROR_DEINITIALIZED,
408
+ * ::CUDA_ERROR_NOT_INITIALIZED,
409
+ * ::CUDA_ERROR_INVALID_CONTEXT,
410
+ * ::CUDA_ERROR_INVALID_VALUE,
411
+ * ::CUDA_ERROR_MAP_FAILED
412
+ * \notefnerr
413
+ *
414
+ * \sa ::cuGraphicsMapResources
415
+ */
416
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr *dptr, size_t *size, GLuint buffer);
417
+
418
+ /**
419
+ * \brief Unmaps an OpenGL buffer object
420
+ *
421
+ * \deprecated This function is deprecated as of Cuda 3.0.
422
+ *
423
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
424
+ *
425
+ * There must be a valid OpenGL context bound to the current thread
426
+ * when this function is called. This must be the same context, or a
427
+ * member of the same shareGroup, as the context that was bound when
428
+ * the buffer was registered.
429
+ *
430
+ * All streams in the current CUDA context are synchronized with the
431
+ * current GL context.
432
+ *
433
+ * \param buffer - Buffer object to unmap
434
+ *
435
+ * \return
436
+ * ::CUDA_SUCCESS,
437
+ * ::CUDA_ERROR_DEINITIALIZED,
438
+ * ::CUDA_ERROR_NOT_INITIALIZED,
439
+ * ::CUDA_ERROR_INVALID_CONTEXT,
440
+ * ::CUDA_ERROR_INVALID_VALUE
441
+ * \notefnerr
442
+ *
443
+ * \sa ::cuGraphicsUnmapResources
444
+ */
445
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObject(GLuint buffer);
446
+
447
+ /**
448
+ * \brief Unregister an OpenGL buffer object
449
+ *
450
+ * \deprecated This function is deprecated as of Cuda 3.0.
451
+ *
452
+ * Unregisters the buffer object specified by \p buffer. This
453
+ * releases any resources associated with the registered buffer.
454
+ * After this call, the buffer may no longer be mapped for access by
455
+ * CUDA.
456
+ *
457
+ * There must be a valid OpenGL context bound to the current thread
458
+ * when this function is called. This must be the same context, or a
459
+ * member of the same shareGroup, as the context that was bound when
460
+ * the buffer was registered.
461
+ *
462
+ * \param buffer - Name of the buffer object to unregister
463
+ *
464
+ * \return
465
+ * ::CUDA_SUCCESS,
466
+ * ::CUDA_ERROR_DEINITIALIZED,
467
+ * ::CUDA_ERROR_NOT_INITIALIZED,
468
+ * ::CUDA_ERROR_INVALID_CONTEXT,
469
+ * ::CUDA_ERROR_INVALID_VALUE
470
+ * \notefnerr
471
+ *
472
+ * \sa ::cuGraphicsUnregisterResource
473
+ */
474
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnregisterBufferObject(GLuint buffer);
475
+
476
+ /**
477
+ * \brief Set the map flags for an OpenGL buffer object
478
+ *
479
+ * \deprecated This function is deprecated as of Cuda 3.0.
480
+ *
481
+ * Sets the map flags for the buffer object specified by \p buffer.
482
+ *
483
+ * Changes to \p Flags will take effect the next time \p buffer is mapped.
484
+ * The \p Flags argument may be any of the following:
485
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
486
+ * resource will be used. It is therefore assumed that this resource will be
487
+ * read from and written to by CUDA kernels. This is the default value.
488
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which
489
+ * access this resource will not write to this resource.
490
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels
491
+ * which access this resource will not read from this resource and will
492
+ * write over the entire contents of the resource, so none of the data
493
+ * previously stored in the resource will be preserved.
494
+ *
495
+ * If \p buffer has not been registered for use with CUDA, then
496
+ * ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently
497
+ * mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned.
498
+ *
499
+ * There must be a valid OpenGL context bound to the current thread
500
+ * when this function is called. This must be the same context, or a
501
+ * member of the same shareGroup, as the context that was bound when
502
+ * the buffer was registered.
503
+ *
504
+ * \param buffer - Buffer object to unmap
505
+ * \param Flags - Map flags
506
+ *
507
+ * \return
508
+ * ::CUDA_SUCCESS,
509
+ * ::CUDA_ERROR_NOT_INITIALIZED,
510
+ * ::CUDA_ERROR_INVALID_HANDLE,
511
+ * ::CUDA_ERROR_ALREADY_MAPPED,
512
+ * ::CUDA_ERROR_INVALID_CONTEXT,
513
+ * \notefnerr
514
+ *
515
+ * \sa ::cuGraphicsResourceSetMapFlags
516
+ */
517
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLSetBufferObjectMapFlags(GLuint buffer, unsigned int Flags);
518
+
519
+ /**
520
+ * \brief Maps an OpenGL buffer object
521
+ *
522
+ * \deprecated This function is deprecated as of Cuda 3.0.
523
+ *
524
+ * Maps the buffer object specified by \p buffer into the address space of the
525
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
526
+ * and size of the resulting mapping.
527
+ *
528
+ * There must be a valid OpenGL context bound to the current thread
529
+ * when this function is called. This must be the same context, or a
530
+ * member of the same shareGroup, as the context that was bound when
531
+ * the buffer was registered.
532
+ *
533
+ * Stream \p hStream in the current CUDA context is synchronized with
534
+ * the current GL context.
535
+ *
536
+ * \param dptr - Returned mapped base pointer
537
+ * \param size - Returned size of mapping
538
+ * \param buffer - The name of the buffer object to map
539
+ * \param hStream - Stream to synchronize
540
+ *
541
+ * \return
542
+ * ::CUDA_SUCCESS,
543
+ * ::CUDA_ERROR_DEINITIALIZED,
544
+ * ::CUDA_ERROR_NOT_INITIALIZED,
545
+ * ::CUDA_ERROR_INVALID_CONTEXT,
546
+ * ::CUDA_ERROR_INVALID_VALUE,
547
+ * ::CUDA_ERROR_MAP_FAILED
548
+ * \notefnerr
549
+ *
550
+ * \sa ::cuGraphicsMapResources
551
+ */
552
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
553
+
554
+ /**
555
+ * \brief Unmaps an OpenGL buffer object
556
+ *
557
+ * \deprecated This function is deprecated as of Cuda 3.0.
558
+ *
559
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
560
+ *
561
+ * There must be a valid OpenGL context bound to the current thread
562
+ * when this function is called. This must be the same context, or a
563
+ * member of the same shareGroup, as the context that was bound when
564
+ * the buffer was registered.
565
+ *
566
+ * Stream \p hStream in the current CUDA context is synchronized with
567
+ * the current GL context.
568
+ *
569
+ * \param buffer - Name of the buffer object to unmap
570
+ * \param hStream - Stream to synchronize
571
+ *
572
+ * \return
573
+ * ::CUDA_SUCCESS,
574
+ * ::CUDA_ERROR_DEINITIALIZED,
575
+ * ::CUDA_ERROR_NOT_INITIALIZED,
576
+ * ::CUDA_ERROR_INVALID_CONTEXT,
577
+ * ::CUDA_ERROR_INVALID_VALUE
578
+ * \notefnerr
579
+ *
580
+ * \sa ::cuGraphicsUnmapResources
581
+ */
582
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObjectAsync(GLuint buffer, CUstream hStream);
583
+
584
+ /** @} */ /* END CUDA_GL_DEPRECATED */
585
+ /** @} */ /* END CUDA_GL */
586
+
587
+
588
+ #if defined(__CUDA_API_VERSION_INTERNAL)
589
+ #undef cuGLCtxCreate
590
+ #undef cuGLMapBufferObject
591
+ #undef cuGLMapBufferObjectAsync
592
+ #undef cuGLGetDevices
593
+
594
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
595
+ CUresult CUDAAPI cuGLMapBufferObject_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer);
596
+ CUresult CUDAAPI cuGLMapBufferObjectAsync_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
597
+ CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
598
+ CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer);
599
+ CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream);
600
+ #endif /* __CUDA_API_VERSION_INTERNAL */
601
+
602
+ #ifdef __cplusplus
603
+ };
604
+ #endif
605
+
606
+ #undef __CUDA_DEPRECATED
607
+
608
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAGLTYPEDEFS_H
51
+ #define CUDAGLTYPEDEFS_H
52
+
53
+ // Dependent includes for cudagl.h
54
+ #include <GL/gl.h>
55
+
56
+ #include <cudaGL.h>
57
+
58
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
59
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds
60
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz
61
+ #else
62
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version
63
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version
64
+ #endif
65
+
66
+ #ifdef __cplusplus
67
+ extern "C" {
68
+ #endif // __cplusplus
69
+
70
+ /*
71
+ * Macros for the latest version for each driver function in cudaGL.h
72
+ */
73
+ #define PFN_cuGraphicsGLRegisterBuffer PFN_cuGraphicsGLRegisterBuffer_v3000
74
+ #define PFN_cuGraphicsGLRegisterImage PFN_cuGraphicsGLRegisterImage_v3000
75
+ #define PFN_cuWGLGetDevice PFN_cuWGLGetDevice_v2020
76
+ #define PFN_cuGLGetDevices PFN_cuGLGetDevices_v6050
77
+ #define PFN_cuGLCtxCreate PFN_cuGLCtxCreate_v3020
78
+ #define PFN_cuGLInit PFN_cuGLInit_v2000
79
+ #define PFN_cuGLRegisterBufferObject PFN_cuGLRegisterBufferObject_v2000
80
+ #define PFN_cuGLMapBufferObject __API_TYPEDEF_PTDS(PFN_cuGLMapBufferObject, 3020, 7000)
81
+ #define PFN_cuGLUnmapBufferObject PFN_cuGLUnmapBufferObject_v2000
82
+ #define PFN_cuGLUnregisterBufferObject PFN_cuGLUnregisterBufferObject_v2000
83
+ #define PFN_cuGLSetBufferObjectMapFlags PFN_cuGLSetBufferObjectMapFlags_v2030
84
+ #define PFN_cuGLMapBufferObjectAsync __API_TYPEDEF_PTSZ(PFN_cuGLMapBufferObjectAsync, 3020, 7000)
85
+ #define PFN_cuGLUnmapBufferObjectAsync PFN_cuGLUnmapBufferObjectAsync_v2030
86
+
87
+
88
+ /**
89
+ * Type definitions for functions defined in cudaGL.h
90
+ */
91
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterBuffer_v3000)(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags);
92
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterImage_v3000)(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags);
93
+ #ifdef _WIN32
94
+ typedef CUresult (CUDAAPI *PFN_cuWGLGetDevice_v2020)(CUdevice_v1 *pDevice, HGPUNV hGpu);
95
+ #endif
96
+ typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v6050)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
97
+ typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v3020)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device);
98
+ typedef CUresult (CUDAAPI *PFN_cuGLInit_v2000)(void);
99
+ typedef CUresult (CUDAAPI *PFN_cuGLRegisterBufferObject_v2000)(GLuint buffer);
100
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v7000_ptds)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer);
101
+ typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObject_v2000)(GLuint buffer);
102
+ typedef CUresult (CUDAAPI *PFN_cuGLUnregisterBufferObject_v2000)(GLuint buffer);
103
+ typedef CUresult (CUDAAPI *PFN_cuGLSetBufferObjectMapFlags_v2030)(GLuint buffer, unsigned int Flags);
104
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v7000_ptsz)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream);
105
+ typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObjectAsync_v2030)(GLuint buffer, CUstream hStream);
106
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer);
107
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream);
108
+
109
+ /*
110
+ * Type definitions for older versioned functions in cuda.h
111
+ */
112
+ #if defined(__CUDA_API_VERSION_INTERNAL)
113
+ typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v4010)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
114
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v2000)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer);
115
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v2030)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream);
116
+ typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v2000)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device);
117
+ #endif
118
+
119
+ #ifdef __cplusplus
120
+ }
121
+ #endif // __cplusplus
122
+
123
+ #endif // file guard
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAPROFILERTYPEDEFS_H
51
+ #define CUDAPROFILERTYPEDEFS_H
52
+
53
+ #include <cudaProfiler.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaProfiler.h
61
+ */
62
+ #define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000
63
+ #define PFN_cuProfilerStart PFN_cuProfilerStart_v4000
64
+ #define PFN_cuProfilerStop PFN_cuProfilerStop_v4000
65
+
66
+
67
+ /**
68
+ * Type definitions for functions defined in cudaProfiler.h
69
+ */
70
+ typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode);
71
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void);
72
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void);
73
+
74
+ #ifdef __cplusplus
75
+ }
76
+ #endif // __cplusplus
77
+
78
+ #endif // file guard
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAU_H
51
+ #define CUDAVDPAU_H
52
+
53
+ #ifdef CUDA_FORCE_API_VERSION
54
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
55
+ #endif
56
+
57
+ #define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2
58
+
59
+ #ifdef __cplusplus
60
+ extern "C" {
61
+ #endif
62
+
63
+ /**
64
+ * \defgroup CUDA_VDPAU VDPAU Interoperability
65
+ * \ingroup CUDA_DRIVER
66
+ *
67
+ * ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver
68
+ * API (___CURRENT_FILE___) ___ENDMANBRIEF___
69
+ *
70
+ * This section describes the VDPAU interoperability functions of the
71
+ * low-level CUDA driver application programming interface.
72
+ *
73
+ * @{
74
+ */
75
+
76
+ /**
77
+ * \brief Gets the CUDA device associated with a VDPAU device
78
+ *
79
+ * Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
80
+ * applicable.
81
+ *
82
+ * \param pDevice - Device associated with vdpDevice
83
+ * \param vdpDevice - A VdpDevice handle
84
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
85
+ *
86
+ * \return
87
+ * ::CUDA_SUCCESS,
88
+ * ::CUDA_ERROR_DEINITIALIZED,
89
+ * ::CUDA_ERROR_NOT_INITIALIZED,
90
+ * ::CUDA_ERROR_INVALID_CONTEXT,
91
+ * ::CUDA_ERROR_INVALID_VALUE
92
+ * \notefnerr
93
+ *
94
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
95
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
96
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
97
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
98
+ * ::cudaVDPAUGetDevice
99
+ */
100
+ CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
101
+
102
+ /**
103
+ * \brief Create a CUDA context for interoperability with VDPAU
104
+ *
105
+ * Creates a new CUDA context, initializes VDPAU interoperability, and
106
+ * associates the CUDA context with the calling thread. It must be called
107
+ * before performing any other VDPAU interoperability operations. It may fail
108
+ * if the needed VDPAU driver facilities are not available. For usage of the
109
+ * \p flags parameter, see ::cuCtxCreate().
110
+ *
111
+ * \param pCtx - Returned CUDA context
112
+ * \param flags - Options for CUDA context creation
113
+ * \param device - Device on which to create the context
114
+ * \param vdpDevice - The VdpDevice to interop with
115
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
116
+ *
117
+ * \return
118
+ * ::CUDA_SUCCESS,
119
+ * ::CUDA_ERROR_DEINITIALIZED,
120
+ * ::CUDA_ERROR_NOT_INITIALIZED,
121
+ * ::CUDA_ERROR_INVALID_CONTEXT,
122
+ * ::CUDA_ERROR_INVALID_VALUE,
123
+ * ::CUDA_ERROR_OUT_OF_MEMORY
124
+ * \notefnerr
125
+ *
126
+ * \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
127
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
128
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
129
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
130
+ * ::cuVDPAUGetDevice
131
+ */
132
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
133
+
134
+ /**
135
+ * \brief Registers a VDPAU VdpVideoSurface object
136
+ *
137
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by
138
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
139
+ * The surface's intended usage is specified using \p flags, as follows:
140
+ *
141
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
142
+ * resource will be used. It is therefore assumed that this resource will be
143
+ * read from and written to by CUDA. This is the default value.
144
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
145
+ * will not write to this resource.
146
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
147
+ * CUDA will not read from this resource and will write over the
148
+ * entire contents of the resource, so none of the data previously
149
+ * stored in the resource will be preserved.
150
+ *
151
+ * The VdpVideoSurface is presented as an array of subresources that may be
152
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
153
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
154
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
155
+ *
156
+ * \htmlonly
157
+ * <table>
158
+ * <tr><th>VdpChromaType </th><th>arrayIndex</th><th>Size </th><th>Format</th><th>Content </th></tr>
159
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_420</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
160
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
161
+ * <tr> <td>2 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Top-field chroma </td></tr>
162
+ * <tr> <td>3 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
163
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_422</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
164
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
165
+ * <tr> <td>2 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Top-field chroma </td></tr>
166
+ * <tr> <td>3 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
167
+ * </table>
168
+ * \endhtmlonly
169
+ *
170
+ * \latexonly
171
+ * \begin{tabular}{|l|l|l|l|l|}
172
+ * \hline
173
+ * VdpChromaType & arrayIndex & Size & Format & Content \\
174
+ * \hline
175
+ * VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\
176
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
177
+ * & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\
178
+ * & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\
179
+ * \hline
180
+ * VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\
181
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
182
+ * & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\
183
+ * & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\
184
+ * \hline
185
+ * \end{tabular}
186
+ * \endlatexonly
187
+ *
188
+ * \param pCudaResource - Pointer to the returned object handle
189
+ * \param vdpSurface - The VdpVideoSurface to be registered
190
+ * \param flags - Map flags
191
+ *
192
+ * \return
193
+ * ::CUDA_SUCCESS,
194
+ * ::CUDA_ERROR_INVALID_HANDLE,
195
+ * ::CUDA_ERROR_ALREADY_MAPPED,
196
+ * ::CUDA_ERROR_INVALID_CONTEXT,
197
+ * \notefnerr
198
+ *
199
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
200
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
201
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
202
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
203
+ * ::cuVDPAUGetDevice,
204
+ * ::cudaGraphicsVDPAURegisterVideoSurface
205
+ */
206
+ CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
207
+
208
+ /**
209
+ * \brief Registers a VDPAU VdpOutputSurface object
210
+ *
211
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by
212
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
213
+ * The surface's intended usage is specified using \p flags, as follows:
214
+ *
215
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
216
+ * resource will be used. It is therefore assumed that this resource will be
217
+ * read from and written to by CUDA. This is the default value.
218
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
219
+ * will not write to this resource.
220
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
221
+ * CUDA will not read from this resource and will write over the
222
+ * entire contents of the resource, so none of the data previously
223
+ * stored in the resource will be preserved.
224
+ *
225
+ * The VdpOutputSurface is presented as an array of subresources that may be
226
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
227
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
228
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
229
+ *
230
+ * \htmlonly
231
+ * <table>
232
+ * <tr><th>VdpRGBAFormat </th><th>arrayIndex</th><th>Size </th><th>Format </th><th>Content </th></tr>
233
+ * <tr><td>VDP_RGBA_FORMAT_B8G8R8A8 </td><td>0 </td><td>w x h</td><td>ARGB8 </td><td>Entire surface</td></tr>
234
+ * <tr><td>VDP_RGBA_FORMAT_R10G10B10A2</td><td>0 </td><td>w x h</td><td>A2BGR10</td><td>Entire surface</td></tr>
235
+ * </table>
236
+ * \endhtmlonly
237
+ *
238
+ * \latexonly
239
+ * \begin{tabular}{|l|l|l|l|l|}
240
+ * \hline
241
+ * VdpRGBAFormat & arrayIndex & Size & Format & Content \\
242
+ * \hline
243
+ * VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\
244
+ * VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\
245
+ * \hline
246
+ * \end{tabular}
247
+ * \endlatexonly
248
+ *
249
+ * \param pCudaResource - Pointer to the returned object handle
250
+ * \param vdpSurface - The VdpOutputSurface to be registered
251
+ * \param flags - Map flags
252
+ *
253
+ * \return
254
+ * ::CUDA_SUCCESS,
255
+ * ::CUDA_ERROR_INVALID_HANDLE,
256
+ * ::CUDA_ERROR_ALREADY_MAPPED,
257
+ * ::CUDA_ERROR_INVALID_CONTEXT,
258
+ * \notefnerr
259
+ *
260
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
261
+ * ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource,
262
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
263
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
264
+ * ::cuVDPAUGetDevice,
265
+ * ::cudaGraphicsVDPAURegisterOutputSurface
266
+ */
267
+ CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
268
+
269
+ /** @} */ /* END CUDA_VDPAU */
270
+
271
+
272
+ #if defined(__CUDA_API_VERSION_INTERNAL)
273
+ #undef cuVDPAUCtxCreate
274
+
275
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
276
+ #endif /* __CUDA_API_VERSION_INTERNAL */
277
+
278
+ #ifdef __cplusplus
279
+ };
280
+ #endif
281
+
282
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAUTYPEDEFS_H
51
+ #define CUDAVDPAUTYPEDEFS_H
52
+
53
+ // Dependent includes for cudavdpau.h
54
+ #include <vdpau/vdpau.h>
55
+
56
+ #include <cudaVDPAU.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif // __cplusplus
61
+
62
+ /*
63
+ * Macros for the latest version for each driver function in cudaVDPAU.h
64
+ */
65
+ #define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010
66
+ #define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020
67
+ #define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010
68
+ #define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010
69
+
70
+
71
+ /**
72
+ * Type definitions for functions defined in cudaVDPAU.h
73
+ */
74
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
75
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
76
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
77
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
78
+
79
+ /*
80
+ * Type definitions for older versioned functions in cudaVDPAU.h
81
+ */
82
+ #if defined(__CUDA_API_VERSION_INTERNAL)
83
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
84
+ #endif
85
+
86
+ #ifdef __cplusplus
87
+ }
88
+ #endif // __cplusplus
89
+
90
+ #endif // file guard
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_H_
51
+ # define _CUDA_AWBARRIER_H_
52
+
53
+ # include "cuda_awbarrier_primitives.h"
54
+
55
+ # if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ # endif
58
+
59
+ # if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER)
60
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
61
+ -std=c++11 compiler option.
62
+ # endif
63
+
64
+ _CUDA_AWBARRIER_BEGIN_NAMESPACE
65
+
66
+ class awbarrier {
67
+ public:
68
+ class arrival_token {
69
+ public:
70
+ arrival_token() = default;
71
+ ~arrival_token() = default;
72
+ _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const;
73
+ private:
74
+ _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token);
75
+ uint64_t token;
76
+ friend awbarrier;
77
+ };
78
+ awbarrier() = default;
79
+ awbarrier(const awbarrier&) = delete;
80
+ awbarrier& operator=(const awbarrier&) = delete;
81
+ ~awbarrier() = default;
82
+
83
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive();
84
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop();
85
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles);
86
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait_parity(bool phase, uint32_t hint_cycles);
87
+ _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token);
88
+ _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait();
89
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait(arrival_token token, uint32_t maxSleepNanosec);
90
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait_parity(bool phase, uint32_t maxSleepNanosec);
91
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max();
92
+
93
+ private:
94
+ uint64_t barrier;
95
+ friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count);
96
+ friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier);
97
+ friend class pipeline;
98
+ };
99
+
100
+ _CUDA_AWBARRIER_QUALIFIER
101
+ uint32_t awbarrier::arrival_token::pending_count() const
102
+ {
103
+ const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token);
104
+ #if (__CUDA_ARCH__ >= 900)
105
+ return pending_count;
106
+ #else
107
+ return (pending_count >> 15);
108
+ #endif
109
+ }
110
+
111
+ _CUDA_AWBARRIER_QUALIFIER
112
+ awbarrier::arrival_token::arrival_token(uint64_t token)
113
+ : token(token)
114
+ {
115
+ }
116
+
117
+ _CUDA_AWBARRIER_QUALIFIER
118
+ void init(awbarrier* barrier, uint32_t expected_count)
119
+ {
120
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
121
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT);
122
+
123
+ #if (__CUDA_ARCH__ >= 900)
124
+ const uint32_t init_count = expected_count;
125
+ #else
126
+ const uint32_t init_count = (expected_count << 15) + expected_count;
127
+ #endif
128
+
129
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count);
130
+ }
131
+
132
+ _CUDA_AWBARRIER_QUALIFIER
133
+ void inval(awbarrier* barrier)
134
+ {
135
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
136
+
137
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier);
138
+ }
139
+
140
+ _CUDA_AWBARRIER_QUALIFIER
141
+ awbarrier::arrival_token awbarrier::arrive()
142
+ {
143
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
144
+
145
+ #if (__CUDA_ARCH__ < 900)
146
+ const uint32_t arrive_count = 1 << 15;
147
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<false>(&this->barrier, arrive_count);
148
+ (void)
149
+ #else
150
+ const uint64_t token =
151
+ #endif
152
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(&this->barrier);
153
+
154
+ return arrival_token(token);
155
+ }
156
+
157
+ _CUDA_AWBARRIER_QUALIFIER
158
+ awbarrier::arrival_token awbarrier::arrive_and_drop()
159
+ {
160
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
161
+
162
+ #if (__CUDA_ARCH__ < 900)
163
+ const uint32_t arrive_count = 1 << 15;
164
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<true>(&this->barrier, arrive_count);
165
+ (void)
166
+ #else
167
+ const uint64_t token =
168
+ #endif
169
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(&this->barrier);
170
+
171
+ return arrival_token(token);
172
+ }
173
+
174
+ _CUDA_AWBARRIER_QUALIFIER
175
+ bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles)
176
+ {
177
+ constexpr uint64_t max_busy_wait_cycles = 1024;
178
+ constexpr uint32_t max_sleep_ns = 1 << 20;
179
+
180
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
181
+
182
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
183
+ return true;
184
+ }
185
+
186
+ uint64_t start_cycles = clock64();
187
+ uint64_t elapsed_cycles = 0;
188
+ uint32_t sleep_ns = 32;
189
+ while (elapsed_cycles < hint_cycles) {
190
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
191
+ return true;
192
+ }
193
+
194
+ if (elapsed_cycles > max_busy_wait_cycles) {
195
+ __nanosleep(sleep_ns);
196
+ if (sleep_ns < max_sleep_ns) {
197
+ sleep_ns *= 2;
198
+ }
199
+ }
200
+
201
+ elapsed_cycles = clock64() - start_cycles;
202
+ }
203
+
204
+ return false;
205
+ }
206
+
207
+ _CUDA_AWBARRIER_QUALIFIER
208
+ bool awbarrier::timed_wait_parity(bool phase, uint32_t hint_cycles)
209
+ {
210
+ constexpr uint64_t max_busy_wait_cycles = 1024;
211
+ constexpr uint32_t max_sleep_ns = 1 << 20;
212
+
213
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
214
+
215
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
216
+ return true;
217
+ }
218
+
219
+ uint64_t start_cycles = clock64();
220
+ uint64_t elapsed_cycles = 0;
221
+ uint32_t sleep_ns = 32;
222
+ while (elapsed_cycles < hint_cycles) {
223
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
224
+ return true;
225
+ }
226
+
227
+ if (elapsed_cycles > max_busy_wait_cycles) {
228
+ __nanosleep(sleep_ns);
229
+ if (sleep_ns < max_sleep_ns) {
230
+ sleep_ns *= 2;
231
+ }
232
+ }
233
+
234
+ elapsed_cycles = clock64() - start_cycles;
235
+ }
236
+
237
+ return false;
238
+ }
239
+
240
+ _CUDA_AWBARRIER_QUALIFIER
241
+ bool awbarrier::try_wait(arrival_token token, uint32_t maxSleepNanosec)
242
+ {
243
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
244
+
245
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(&this->barrier, token.token, maxSleepNanosec);
246
+ }
247
+
248
+ _CUDA_AWBARRIER_QUALIFIER
249
+ bool awbarrier::try_wait_parity(bool phase, uint32_t maxSleepNanosec)
250
+ {
251
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
252
+
253
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(&this->barrier, phase, maxSleepNanosec);
254
+ }
255
+
256
+ _CUDA_AWBARRIER_QUALIFIER
257
+ void awbarrier::wait(arrival_token token)
258
+ {
259
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
260
+
261
+ while (!timed_wait(token, ~0u));
262
+ }
263
+
264
+ _CUDA_AWBARRIER_QUALIFIER
265
+ void awbarrier::arrive_and_wait()
266
+ {
267
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
268
+
269
+ this->wait(this->arrive());
270
+ }
271
+
272
+ _CUDA_AWBARRIER_QUALIFIER __host__
273
+ constexpr uint32_t awbarrier::max()
274
+ {
275
+ return _CUDA_AWBARRIER_MAX_COUNT;
276
+ }
277
+
278
+ _CUDA_AWBARRIER_END_NAMESPACE
279
+
280
+ #endif /* !_CUDA_AWBARRIER_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_HELPERS_H_
51
+ #define _CUDA_AWBARRIER_HELPERS_H_
52
+
53
+ #define _CUDA_AWBARRIER_NAMESPACE nvcuda::experimental
54
+ #define _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace nvcuda { namespace experimental {
55
+ #define _CUDA_AWBARRIER_END_NAMESPACE } }
56
+
57
+ #define _CUDA_AWBARRIER_INTERNAL_NAMESPACE _CUDA_AWBARRIER_NAMESPACE::__awbarrier_internal
58
+ #define _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace __awbarrier_internal {
59
+ #define _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE } _CUDA_AWBARRIER_END_NAMESPACE
60
+
61
+ # if !defined(_CUDA_AWBARRIER_QUALIFIER)
62
+ # define _CUDA_AWBARRIER_QUALIFIER inline __device__
63
+ # endif
64
+ # if !defined(_CUDA_AWBARRIER_STATIC_QUALIFIER)
65
+ # define _CUDA_AWBARRIER_STATIC_QUALIFIER static inline __device__
66
+ #endif
67
+
68
+ #if defined(__CUDA_ARCH__)
69
+ #if (__CUDA_ARCH__ >= 900)
70
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_90
71
+ #elif (__CUDA_ARCH__ >= 800)
72
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_80
73
+ #elif (__CUDA_ARCH__ >= 700)
74
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70
75
+ #endif
76
+ #else
77
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70
78
+ #endif
79
+
80
+ #define _CUDA_AWBARRIER_MAX_COUNT ((1 << 14) - 1)
81
+
82
+ #if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900)))
83
+ # define _CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER
84
+ #endif
85
+
86
+ #if !defined(_CUDA_AWBARRIER_DEBUG)
87
+ # if defined(__CUDACC_DEBUG__)
88
+ # define _CUDA_AWBARRIER_DEBUG 1
89
+ # else
90
+ # define _CUDA_AWBARRIER_DEBUG 0
91
+ # endif
92
+ #endif
93
+
94
+ #if defined(_CUDA_AWBARRIER_DEBUG) && (_CUDA_AWBARRIER_DEBUG == 1) && !defined(NDEBUG)
95
+ # if !defined(__CUDACC_RTC__)
96
+ # include <cassert>
97
+ # endif
98
+ # define _CUDA_AWBARRIER_ASSERT(x) assert((x));
99
+ # define _CUDA_AWBARRIER_ABORT() assert(0);
100
+ #else
101
+ # define _CUDA_AWBARRIER_ASSERT(x)
102
+ # define _CUDA_AWBARRIER_ABORT() __trap();
103
+ #endif
104
+
105
+ #if defined(__CUDACC_RTC__)
106
+ typedef unsigned short uint16_t;
107
+ typedef unsigned int uint32_t;
108
+ typedef unsigned long long uint64_t;
109
+ typedef uint64_t uintptr_t;
110
+ #else
111
+ # include <stdint.h>
112
+ #endif
113
+
114
+ // implicitly provided by NVRTC
115
+ #ifndef __CUDACC_RTC__
116
+ #include <nv/target>
117
+ #endif /* !defined(__CUDACC_RTC__) */
118
+
119
+ typedef uint64_t __mbarrier_t;
120
+ typedef uint64_t __mbarrier_token_t;
121
+
122
+ _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE
123
+
124
+ extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *);
125
+
126
+ union AWBarrier {
127
+ struct {
128
+ uint32_t expected;
129
+ uint32_t pending;
130
+ } split;
131
+ uint64_t raw;
132
+ };
133
+
134
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
135
+ void awbarrier_init(uint64_t* barrier, uint32_t expected_count) {
136
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
137
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count < (1 << 29));
138
+
139
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
140
+ asm volatile ("mbarrier.init.shared.b64 [%0], %1;"
141
+ :
142
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(expected_count)
143
+ : "memory");
144
+ return;
145
+ )
146
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
147
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
148
+
149
+ awbarrier->split.expected = 0x40000000 - expected_count;
150
+ awbarrier->split.pending = 0x80000000 - expected_count;
151
+ return;
152
+ )
153
+ }
154
+
155
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
156
+ void awbarrier_inval(uint64_t* barrier) {
157
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
158
+
159
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
160
+ asm volatile ("mbarrier.inval.shared.b64 [%0];"
161
+ :
162
+ : "r"(__nvvm_get_smem_pointer(barrier))
163
+ : "memory");
164
+ return;
165
+ )
166
+ return;
167
+ }
168
+
169
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
170
+ uint32_t awbarrier_token_pending_count(uint64_t token) {
171
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
172
+ uint32_t __pending_count;
173
+
174
+ asm ("mbarrier.pending_count.b64 %0, %1;"
175
+ : "=r"(__pending_count)
176
+ : "l"(token));
177
+ return __pending_count;
178
+ )
179
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
180
+ const uint32_t pending = token >> 32;
181
+ return 0x80000000 - (pending & 0x7fffffff);
182
+ )
183
+ }
184
+
185
+ template<bool _Drop>
186
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
187
+ uint64_t awbarrier_arrive_drop(uint64_t* barrier) {
188
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
189
+
190
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
191
+ uint64_t token;
192
+
193
+ if (_Drop) {
194
+ asm volatile ("mbarrier.arrive_drop.shared.b64 %0, [%1];"
195
+ : "=l"(token)
196
+ : "r"(__nvvm_get_smem_pointer(barrier))
197
+ : "memory");
198
+ } else {
199
+ asm volatile ("mbarrier.arrive.shared.b64 %0, [%1];"
200
+ : "=l"(token)
201
+ : "r"(__nvvm_get_smem_pointer(barrier))
202
+ : "memory");
203
+ }
204
+
205
+ return token;
206
+ )
207
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
208
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
209
+
210
+ while ((*reinterpret_cast<volatile uint32_t*>(&awbarrier->split.pending) & 0x7fffffff) == 0);
211
+
212
+ if (_Drop) {
213
+ (void)atomicAdd_block(&awbarrier->split.expected, 1);
214
+ }
215
+
216
+ __threadfence_block();
217
+
218
+ const uint32_t old_pending = atomicAdd_block(&awbarrier->split.pending, 1);
219
+ const uint32_t new_pending = old_pending + 1;
220
+ const bool reset = (old_pending ^ new_pending) & 0x80000000;
221
+
222
+ if (reset) {
223
+ __threadfence_block();
224
+
225
+ uint32_t new_expected = *reinterpret_cast<volatile uint32_t*>(&awbarrier->split.expected);
226
+ new_expected &= ~0x40000000;
227
+ if (new_expected & 0x20000000) {
228
+ new_expected |= 0x40000000;
229
+ }
230
+ atomicAdd_block(&awbarrier->split.pending, new_expected);
231
+ }
232
+
233
+ return static_cast<uint64_t>(old_pending) << 32;
234
+ )
235
+ }
236
+
237
+ template<bool _Drop>
238
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
239
+ uint64_t awbarrier_arrive_drop_no_complete(uint64_t* barrier, uint32_t count) {
240
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
241
+ _CUDA_AWBARRIER_ASSERT(count > 0 && count < (1 << 29));
242
+
243
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
244
+ uint64_t token;
245
+
246
+ if (_Drop) {
247
+ asm volatile ("mbarrier.arrive_drop.noComplete.shared.b64 %0, [%1], %2;"
248
+ : "=l"(token)
249
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count)
250
+ : "memory");
251
+ } else {
252
+ asm volatile ("mbarrier.arrive.noComplete.shared.b64 %0, [%1], %2;"
253
+ : "=l"(token)
254
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count)
255
+ : "memory");
256
+ }
257
+
258
+ return token;
259
+ )
260
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
261
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
262
+
263
+ while ((*reinterpret_cast<volatile uint32_t*>(&awbarrier->split.pending) & 0x7fffffff) == 0);
264
+
265
+ if (_Drop) {
266
+ (void)atomicAdd_block(&awbarrier->split.expected, count);
267
+ }
268
+
269
+ return static_cast<uint64_t>(atomicAdd_block(&awbarrier->split.pending, count)) << 32;
270
+ )
271
+ }
272
+
273
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
274
+ bool awbarrier_test_wait(uint64_t* barrier, uint64_t token) {
275
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
276
+
277
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
278
+ uint32_t __wait_complete;
279
+
280
+ asm volatile ("{"
281
+ " .reg .pred %%p;"
282
+ " mbarrier.test_wait.shared.b64 %%p, [%1], %2;"
283
+ " selp.b32 %0, 1, 0, %%p;"
284
+ "}"
285
+ : "=r"(__wait_complete)
286
+ : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token)
287
+ : "memory");
288
+ return bool(__wait_complete);
289
+ )
290
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
291
+ volatile AWBarrier* awbarrier = reinterpret_cast<volatile AWBarrier*>(barrier);
292
+
293
+ return ((token >> 32) ^ awbarrier->split.pending) & 0x80000000;
294
+ )
295
+ }
296
+
297
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
298
+ bool awbarrier_test_wait_parity(uint64_t* barrier, bool phase_parity) {
299
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
300
+
301
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
302
+ uint32_t __wait_complete = 0;
303
+
304
+ asm volatile ("{"
305
+ ".reg .pred %%p;"
306
+ "mbarrier.test_wait.parity.shared.b64 %%p, [%1], %2;"
307
+ "selp.b32 %0, 1, 0, %%p;"
308
+ "}"
309
+ : "=r"(__wait_complete)
310
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast<uint32_t>(phase_parity))
311
+ : "memory");
312
+
313
+ return __wait_complete;
314
+ )
315
+ _CUDA_AWBARRIER_ABORT()
316
+ return false;
317
+ }
318
+
319
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
320
+ bool awbarrier_try_wait(uint64_t* barrier, uint64_t token, uint32_t max_sleep_nanosec) {
321
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
322
+
323
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
324
+ uint32_t __wait_complete = 0;
325
+
326
+ asm volatile ("{\n\t"
327
+ ".reg .pred p;\n\t"
328
+ "mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n\t"
329
+ "selp.b32 %0, 1, 0, p;\n\t"
330
+ "}"
331
+ : "=r"(__wait_complete)
332
+ : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token), "r"(max_sleep_nanosec)
333
+ : "memory");
334
+
335
+ return __wait_complete;
336
+ )
337
+ _CUDA_AWBARRIER_ABORT()
338
+ return false;
339
+ }
340
+
341
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
342
+ bool awbarrier_try_wait_parity(uint64_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) {
343
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
344
+
345
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
346
+ uint32_t __wait_complete = 0;
347
+
348
+ asm volatile ("{\n\t"
349
+ ".reg .pred p;\n\t"
350
+ "mbarrier.try_wait.parity.shared.b64 p, [%1], %2, %3;\n\t"
351
+ "selp.b32 %0, 1, 0, p;\n\t"
352
+ "}"
353
+ : "=r"(__wait_complete)
354
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast<uint32_t>(phase_parity)), "r"(max_sleep_nanosec)
355
+ : "memory");
356
+
357
+ return __wait_complete;
358
+ )
359
+ _CUDA_AWBARRIER_ABORT()
360
+ return false;
361
+ }
362
+
363
+ _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE
364
+
365
+ #endif /* !_CUDA_AWBARRIER_HELPERS_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_PRIMITIVES_H_
51
+ #define _CUDA_AWBARRIER_PRIMITIVES_H_
52
+
53
+ #include "cuda_awbarrier_helpers.h"
54
+
55
+ #if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ #endif
58
+
59
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__
60
+ uint32_t __mbarrier_maximum_count() {
61
+ return _CUDA_AWBARRIER_MAX_COUNT;
62
+ }
63
+
64
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
65
+ void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) {
66
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count);
67
+ }
68
+
69
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
70
+ void __mbarrier_inval(__mbarrier_t* barrier) {
71
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier);
72
+ }
73
+
74
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
75
+ __mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) {
76
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(barrier);
77
+ }
78
+
79
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
80
+ __mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) {
81
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(barrier);
82
+ }
83
+
84
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
85
+ bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) {
86
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token);
87
+ }
88
+
89
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
90
+ uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) {
91
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token);
92
+ }
93
+
94
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
95
+ bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) {
96
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity);
97
+ }
98
+
99
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
100
+ bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) {
101
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec);
102
+ }
103
+
104
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
105
+ bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) {
106
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec);
107
+ }
108
+
109
+ #endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_DEVICE_RUNTIME_API_H__)
51
+ #define __CUDA_DEVICE_RUNTIME_API_H__
52
+
53
+ #if defined(__CUDACC__) && !defined(__CUDACC_RTC__)
54
+ #include <stdlib.h>
55
+ #endif
56
+
57
+ /*******************************************************************************
58
+ * *
59
+ * *
60
+ * *
61
+ *******************************************************************************/
62
+
63
+ #if !defined(CUDA_FORCE_CDP1_IF_SUPPORTED) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_NVHPC_CUDA) && !(defined(_WIN32) && !defined(_WIN64))
64
+ #define __CUDA_INTERNAL_USE_CDP2
65
+ #endif
66
+
67
+ #if !defined(__CUDACC_RTC__)
68
+
69
+ #if !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__)
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ struct cudaFuncAttributes;
76
+
77
+
78
+ #ifndef __CUDA_INTERNAL_USE_CDP2
79
+ inline __device__ cudaError_t CUDARTAPI cudaMalloc(void **p, size_t s)
80
+ {
81
+ return cudaErrorUnknown;
82
+ }
83
+
84
+ inline __device__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *p, const void *c)
85
+ {
86
+ return cudaErrorUnknown;
87
+ }
88
+
89
+ inline __device__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
90
+ {
91
+ return cudaErrorUnknown;
92
+ }
93
+
94
+ inline __device__ cudaError_t CUDARTAPI cudaGetDevice(int *device)
95
+ {
96
+ return cudaErrorUnknown;
97
+ }
98
+
99
+ inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
100
+ {
101
+ return cudaErrorUnknown;
102
+ }
103
+
104
+ inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
105
+ {
106
+ return cudaErrorUnknown;
107
+ }
108
+ #else // __CUDA_INTERNAL_USE_CDP2
109
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **p, size_t s)
110
+ {
111
+ return cudaErrorUnknown;
112
+ }
113
+
114
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *p, const void *c)
115
+ {
116
+ return cudaErrorUnknown;
117
+ }
118
+
119
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
120
+ {
121
+ return cudaErrorUnknown;
122
+ }
123
+
124
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device)
125
+ {
126
+ return cudaErrorUnknown;
127
+ }
128
+
129
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
130
+ {
131
+ return cudaErrorUnknown;
132
+ }
133
+
134
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
135
+ {
136
+ return cudaErrorUnknown;
137
+ }
138
+ #endif // __CUDA_INTERNAL_USE_CDP2
139
+
140
+
141
+ #if defined(__cplusplus)
142
+ }
143
+ #endif
144
+
145
+ #endif /* !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) */
146
+
147
+ #endif /* !defined(__CUDACC_RTC__) */
148
+
149
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
150
+ # define __DEPRECATED__(msg)
151
+ #elif defined(_WIN32)
152
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
153
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
154
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
155
+ #else
156
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
157
+ #endif
158
+
159
+ #if defined(__CUDA_ARCH__) && !defined(__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING)
160
+ # define __CDPRT_DEPRECATED(func_name) __DEPRECATED__("Use of "#func_name" from device code is deprecated. Moreover, such use will cause this module to fail to load on sm_90+ devices. If calls to "#func_name" from device code cannot be removed for older devices at this time, you may guard them with __CUDA_ARCH__ macros to remove them only for sm_90+ devices, making sure to generate code for compute_90 for the macros to take effect. Note that this mitigation will no longer work when support for "#func_name" from device code is eventually dropped for all devices. Disable this warning with -D__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING.")
161
+ #else
162
+ # define __CDPRT_DEPRECATED(func_name)
163
+ #endif
164
+
165
+ #if defined(__cplusplus) && defined(__CUDACC__) /* Visible to nvcc front-end only */
166
+ #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) // Visible to SM>=3.5 and "__host__ __device__" only
167
+
168
+ #include "driver_types.h"
169
+ #include "crt/host_defines.h"
170
+
171
+ #define cudaStreamGraphTailLaunch (cudaStream_t)0x0100000000000000
172
+ #define cudaStreamGraphFireAndForget (cudaStream_t)0x0200000000000000
173
+
174
+ #ifdef __CUDA_INTERNAL_USE_CDP2
175
+ #define cudaStreamTailLaunch ((cudaStream_t)0x3) /**< Per-grid stream with a fire-and-forget synchronization behavior. Only applicable when used with CUDA Dynamic Parallelism. */
176
+ #define cudaStreamFireAndForget ((cudaStream_t)0x4) /**< Per-grid stream with a tail launch semantics. Only applicable when used with CUDA Dynamic Parallelism. */
177
+ #endif
178
+
179
+ extern "C"
180
+ {
181
+
182
+ // Symbols beginning with __cudaCDP* should not be used outside
183
+ // this header file. Instead, compile with -DCUDA_FORCE_CDP1_IF_SUPPORTED if
184
+ // CDP1 support is required.
185
+
186
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaDeviceSynchronizeDeprecationAvoidance(void);
187
+
188
+ #ifndef __CUDA_INTERNAL_USE_CDP2
189
+ //// CDP1 endpoints
190
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device);
191
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit);
192
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig);
193
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig);
194
+ #if (__CUDA_ARCH__ < 900) && (defined(CUDA_FORCE_CDP1_IF_SUPPORTED) || (defined(_WIN32) && !defined(_WIN64)))
195
+ // cudaDeviceSynchronize is removed on sm_90+
196
+ extern __device__ __cudart_builtin__ __CDPRT_DEPRECATED(cudaDeviceSynchronize) cudaError_t CUDARTAPI cudaDeviceSynchronize(void);
197
+ #endif
198
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void);
199
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void);
200
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error);
201
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error);
202
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count);
203
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device);
204
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags);
205
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream);
206
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
207
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
208
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags);
209
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream);
210
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream);
211
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
212
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
213
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event);
214
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func);
215
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr);
216
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size);
217
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
218
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
219
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
220
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
221
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
222
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
223
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream);
224
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream);
225
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
226
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
227
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
228
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
229
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion);
230
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize);
231
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
232
+ #endif // __CUDA_INTERNAL_USE_CDP2
233
+
234
+ //// CDP2 endpoints
235
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device);
236
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetLimit(size_t *pValue, enum cudaLimit limit);
237
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig);
238
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig);
239
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetLastError(void);
240
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2PeekAtLastError(void);
241
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorString(cudaError_t error);
242
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorName(cudaError_t error);
243
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDeviceCount(int *count);
244
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device);
245
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags);
246
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamDestroy(cudaStream_t stream);
247
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
248
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
249
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventCreateWithFlags(cudaEvent_t *event, unsigned int flags);
250
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord(cudaEvent_t event, cudaStream_t stream);
251
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord_ptsz(cudaEvent_t event, cudaStream_t stream);
252
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
253
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
254
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventDestroy(cudaEvent_t event);
255
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *attr, const void *func);
256
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Free(void *devPtr);
257
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **devPtr, size_t size);
258
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
259
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
260
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
261
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
262
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
263
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
264
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream);
265
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream);
266
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
267
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
268
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
269
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
270
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2RuntimeGetVersion(int *runtimeVersion);
271
+ extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBuffer(size_t alignment, size_t size);
272
+ extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize);
273
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
274
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream);
275
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
276
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2(void *parameterBuffer, cudaStream_t stream);
277
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize);
278
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
279
+
280
+
281
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
282
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
283
+ static inline __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch_ptsz(cudaGraphExec_t graphExec, cudaStream_t stream)
284
+ {
285
+ if (stream == 0) {
286
+ stream = cudaStreamPerThread;
287
+ }
288
+ return cudaGraphLaunch(graphExec, stream);
289
+ }
290
+ #endif
291
+
292
+ /**
293
+ * \ingroup CUDART_GRAPH
294
+ * \brief Get the currently running device graph id.
295
+ *
296
+ * Get the currently running device graph id.
297
+ * \return Returns the current device graph id, 0 if the call is outside of a device graph.
298
+ * \sa cudaLaunchDevice
299
+ */
300
+ static inline __device__ __cudart_builtin__ cudaGraphExec_t CUDARTAPI cudaGetCurrentGraphExec(void)
301
+ {
302
+ unsigned long long current_graph_exec;
303
+ asm ("mov.u64 %0, %%current_graph_exec;" : "=l"(current_graph_exec));
304
+ return (cudaGraphExec_t)current_graph_exec;
305
+ }
306
+
307
+ /**
308
+ * \ingroup CUDART_EXECUTION
309
+ * \brief Programmatic dependency trigger
310
+ *
311
+ * This device function ensures the programmatic launch completion edges /
312
+ * events are fulfilled. See
313
+ * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization
314
+ * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more
315
+ * information. The event / edge kick off only happens when every CTAs
316
+ * in the grid has either exited or called this function at least once,
317
+ * otherwise the kick off happens automatically after all warps finishes
318
+ * execution but before the grid completes. The kick off only enables
319
+ * scheduling of the secondary kernel. It provides no memory visibility
320
+ * guarantee itself. The user could enforce memory visibility by inserting a
321
+ * memory fence of the correct scope.
322
+ */
323
+ static inline __device__ __cudart_builtin__ void CUDARTAPI cudaTriggerProgrammaticLaunchCompletion(void)
324
+ {
325
+ asm volatile("griddepcontrol.launch_dependents;":::);
326
+ }
327
+
328
+ /**
329
+ * \ingroup CUDART_EXECUTION
330
+ * \brief Programmatic grid dependency synchronization
331
+ *
332
+ * This device function will block the thread until all direct grid
333
+ * dependencies have completed. This API is intended to use in conjuncture with
334
+ * programmatic / launch event / dependency. See
335
+ * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization
336
+ * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more
337
+ * information.
338
+ */
339
+ static inline __device__ __cudart_builtin__ void CUDARTAPI cudaGridDependencySynchronize(void)
340
+ {
341
+ asm volatile("griddepcontrol.wait;":::"memory");
342
+ }
343
+
344
+
345
+ //// CG API
346
+ extern __device__ __cudart_builtin__ unsigned long long CUDARTAPI cudaCGGetIntrinsicHandle(enum cudaCGScope scope);
347
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronize(unsigned long long handle, unsigned int flags);
348
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronizeGrid(unsigned long long handle, unsigned int flags);
349
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetSize(unsigned int *numThreads, unsigned int *numGrids, unsigned long long handle);
350
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetRank(unsigned int *threadRank, unsigned int *gridRank, unsigned long long handle);
351
+
352
+
353
+ //// CDP API
354
+
355
+ #ifdef __CUDA_ARCH__
356
+
357
+ #ifdef __CUDA_INTERNAL_USE_CDP2
358
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
359
+ {
360
+ return __cudaCDP2DeviceGetAttribute(value, attr, device);
361
+ }
362
+
363
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit)
364
+ {
365
+ return __cudaCDP2DeviceGetLimit(pValue, limit);
366
+ }
367
+
368
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig)
369
+ {
370
+ return __cudaCDP2DeviceGetCacheConfig(pCacheConfig);
371
+ }
372
+
373
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig)
374
+ {
375
+ return __cudaCDP2DeviceGetSharedMemConfig(pConfig);
376
+ }
377
+
378
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void)
379
+ {
380
+ return __cudaCDP2GetLastError();
381
+ }
382
+
383
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void)
384
+ {
385
+ return __cudaCDP2PeekAtLastError();
386
+ }
387
+
388
+ static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error)
389
+ {
390
+ return __cudaCDP2GetErrorString(error);
391
+ }
392
+
393
+ static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error)
394
+ {
395
+ return __cudaCDP2GetErrorName(error);
396
+ }
397
+
398
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count)
399
+ {
400
+ return __cudaCDP2GetDeviceCount(count);
401
+ }
402
+
403
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device)
404
+ {
405
+ return __cudaCDP2GetDevice(device);
406
+ }
407
+
408
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags)
409
+ {
410
+ return __cudaCDP2StreamCreateWithFlags(pStream, flags);
411
+ }
412
+
413
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream)
414
+ {
415
+ return __cudaCDP2StreamDestroy(stream);
416
+ }
417
+
418
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags)
419
+ {
420
+ return __cudaCDP2StreamWaitEvent(stream, event, flags);
421
+ }
422
+
423
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags)
424
+ {
425
+ return __cudaCDP2StreamWaitEvent_ptsz(stream, event, flags);
426
+ }
427
+
428
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags)
429
+ {
430
+ return __cudaCDP2EventCreateWithFlags(event, flags);
431
+ }
432
+
433
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream)
434
+ {
435
+ return __cudaCDP2EventRecord(event, stream);
436
+ }
437
+
438
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream)
439
+ {
440
+ return __cudaCDP2EventRecord_ptsz(event, stream);
441
+ }
442
+
443
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags)
444
+ {
445
+ return __cudaCDP2EventRecordWithFlags(event, stream, flags);
446
+ }
447
+
448
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags)
449
+ {
450
+ return __cudaCDP2EventRecordWithFlags_ptsz(event, stream, flags);
451
+ }
452
+
453
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event)
454
+ {
455
+ return __cudaCDP2EventDestroy(event);
456
+ }
457
+
458
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func)
459
+ {
460
+ return __cudaCDP2FuncGetAttributes(attr, func);
461
+ }
462
+
463
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr)
464
+ {
465
+ return __cudaCDP2Free(devPtr);
466
+ }
467
+
468
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size)
469
+ {
470
+ return __cudaCDP2Malloc(devPtr, size);
471
+ }
472
+
473
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream)
474
+ {
475
+ return __cudaCDP2MemcpyAsync(dst, src, count, kind, stream);
476
+ }
477
+
478
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream)
479
+ {
480
+ return __cudaCDP2MemcpyAsync_ptsz(dst, src, count, kind, stream);
481
+ }
482
+
483
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream)
484
+ {
485
+ return __cudaCDP2Memcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, stream);
486
+ }
487
+
488
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream)
489
+ {
490
+ return __cudaCDP2Memcpy2DAsync_ptsz(dst, dpitch, src, spitch, width, height, kind, stream);
491
+ }
492
+
493
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream)
494
+ {
495
+ return __cudaCDP2Memcpy3DAsync(p, stream);
496
+ }
497
+
498
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream)
499
+ {
500
+ return __cudaCDP2Memcpy3DAsync_ptsz(p, stream);
501
+ }
502
+
503
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream)
504
+ {
505
+ return __cudaCDP2MemsetAsync(devPtr, value, count, stream);
506
+ }
507
+
508
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream)
509
+ {
510
+ return __cudaCDP2MemsetAsync_ptsz(devPtr, value, count, stream);
511
+ }
512
+
513
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream)
514
+ {
515
+ return __cudaCDP2Memset2DAsync(devPtr, pitch, value, width, height, stream);
516
+ }
517
+
518
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream)
519
+ {
520
+ return __cudaCDP2Memset2DAsync_ptsz(devPtr, pitch, value, width, height, stream);
521
+ }
522
+
523
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream)
524
+ {
525
+ return __cudaCDP2Memset3DAsync(pitchedDevPtr, value, extent, stream);
526
+ }
527
+
528
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream)
529
+ {
530
+ return __cudaCDP2Memset3DAsync_ptsz(pitchedDevPtr, value, extent, stream);
531
+ }
532
+
533
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion)
534
+ {
535
+ return __cudaCDP2RuntimeGetVersion(runtimeVersion);
536
+ }
537
+
538
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
539
+ {
540
+ return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSmemSize);
541
+ }
542
+
543
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
544
+ {
545
+ return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSmemSize, flags);
546
+ }
547
+ #endif // __CUDA_INTERNAL_USE_CDP2
548
+
549
+ #endif // __CUDA_ARCH__
550
+
551
+
552
+ /**
553
+ * \ingroup CUDART_EXECUTION
554
+ * \brief Obtains a parameter buffer
555
+ *
556
+ * Obtains a parameter buffer which can be filled with parameters for a kernel launch.
557
+ * Parameters passed to ::cudaLaunchDevice must be allocated via this function.
558
+ *
559
+ * This is a low level API and can only be accessed from Parallel Thread Execution (PTX).
560
+ * CUDA user code should use <<< >>> to launch kernels.
561
+ *
562
+ * \param alignment - Specifies alignment requirement of the parameter buffer
563
+ * \param size - Specifies size requirement in bytes
564
+ *
565
+ * \return
566
+ * Returns pointer to the allocated parameterBuffer
567
+ * \notefnerr
568
+ *
569
+ * \sa cudaLaunchDevice
570
+ */
571
+ #ifdef __CUDA_INTERNAL_USE_CDP2
572
+ static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size)
573
+ {
574
+ return __cudaCDP2GetParameterBuffer(alignment, size);
575
+ }
576
+ #else
577
+ extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size);
578
+ #endif
579
+
580
+
581
+ /**
582
+ * \ingroup CUDART_EXECUTION
583
+ * \brief Launches a specified kernel
584
+ *
585
+ * Launches a specified kernel with the specified parameter buffer. A parameter buffer can be obtained
586
+ * by calling ::cudaGetParameterBuffer().
587
+ *
588
+ * This is a low level API and can only be accessed from Parallel Thread Execution (PTX).
589
+ * CUDA user code should use <<< >>> to launch the kernels.
590
+ *
591
+ * \param func - Pointer to the kernel to be launched
592
+ * \param parameterBuffer - Holds the parameters to the launched kernel. parameterBuffer can be NULL. (Optional)
593
+ * \param gridDimension - Specifies grid dimensions
594
+ * \param blockDimension - Specifies block dimensions
595
+ * \param sharedMemSize - Specifies size of shared memory
596
+ * \param stream - Specifies the stream to be used
597
+ *
598
+ * \return
599
+ * ::cudaSuccess, ::cudaErrorInvalidDevice, ::cudaErrorLaunchMaxDepthExceeded, ::cudaErrorInvalidConfiguration,
600
+ * ::cudaErrorStartupFailure, ::cudaErrorLaunchPendingCountExceeded, ::cudaErrorLaunchOutOfResources
601
+ * \notefnerr
602
+ * \n Please refer to Execution Configuration and Parameter Buffer Layout from the CUDA Programming
603
+ * Guide for the detailed descriptions of launch configuration and parameter layout respectively.
604
+ *
605
+ * \sa cudaGetParameterBuffer
606
+ */
607
+ #ifdef __CUDA_INTERNAL_USE_CDP2
608
+ static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize)
609
+ {
610
+ return __cudaCDP2GetParameterBufferV2(func, gridDimension, blockDimension, sharedMemSize);
611
+ }
612
+ #else
613
+ extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize);
614
+ #endif
615
+
616
+
617
+ #ifdef __CUDA_INTERNAL_USE_CDP2
618
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
619
+ {
620
+ return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
621
+ }
622
+
623
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream)
624
+ {
625
+ return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream);
626
+ }
627
+ #else
628
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
629
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream);
630
+ #endif
631
+
632
+
633
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
634
+ // When compiling for the device and per thread default stream is enabled, add
635
+ // a static inline redirect to the per thread stream entry points.
636
+
637
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI
638
+ cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
639
+ {
640
+ #ifdef __CUDA_INTERNAL_USE_CDP2
641
+ return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
642
+ #else
643
+ return cudaLaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
644
+ #endif
645
+ }
646
+
647
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI
648
+ cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream)
649
+ {
650
+ #ifdef __CUDA_INTERNAL_USE_CDP2
651
+ return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream);
652
+ #else
653
+ return cudaLaunchDeviceV2_ptsz(parameterBuffer, stream);
654
+ #endif
655
+ }
656
+ #else // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
657
+ #ifdef __CUDA_INTERNAL_USE_CDP2
658
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
659
+ {
660
+ return __cudaCDP2LaunchDevice(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
661
+ }
662
+
663
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream)
664
+ {
665
+ return __cudaCDP2LaunchDeviceV2(parameterBuffer, stream);
666
+ }
667
+ #else // __CUDA_INTERNAL_USE_CDP2
668
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
669
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream);
670
+ #endif // __CUDA_INTERNAL_USE_CDP2
671
+ #endif // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
672
+
673
+
674
+ // These symbols should not be used outside of this header file.
675
+ #define __cudaCDP2DeviceGetAttribute
676
+ #define __cudaCDP2DeviceGetLimit
677
+ #define __cudaCDP2DeviceGetCacheConfig
678
+ #define __cudaCDP2DeviceGetSharedMemConfig
679
+ #define __cudaCDP2GetLastError
680
+ #define __cudaCDP2PeekAtLastError
681
+ #define __cudaCDP2GetErrorString
682
+ #define __cudaCDP2GetErrorName
683
+ #define __cudaCDP2GetDeviceCount
684
+ #define __cudaCDP2GetDevice
685
+ #define __cudaCDP2StreamCreateWithFlags
686
+ #define __cudaCDP2StreamDestroy
687
+ #define __cudaCDP2StreamWaitEvent
688
+ #define __cudaCDP2StreamWaitEvent_ptsz
689
+ #define __cudaCDP2EventCreateWithFlags
690
+ #define __cudaCDP2EventRecord
691
+ #define __cudaCDP2EventRecord_ptsz
692
+ #define __cudaCDP2EventRecordWithFlags
693
+ #define __cudaCDP2EventRecordWithFlags_ptsz
694
+ #define __cudaCDP2EventDestroy
695
+ #define __cudaCDP2FuncGetAttributes
696
+ #define __cudaCDP2Free
697
+ #define __cudaCDP2Malloc
698
+ #define __cudaCDP2MemcpyAsync
699
+ #define __cudaCDP2MemcpyAsync_ptsz
700
+ #define __cudaCDP2Memcpy2DAsync
701
+ #define __cudaCDP2Memcpy2DAsync_ptsz
702
+ #define __cudaCDP2Memcpy3DAsync
703
+ #define __cudaCDP2Memcpy3DAsync_ptsz
704
+ #define __cudaCDP2MemsetAsync
705
+ #define __cudaCDP2MemsetAsync_ptsz
706
+ #define __cudaCDP2Memset2DAsync
707
+ #define __cudaCDP2Memset2DAsync_ptsz
708
+ #define __cudaCDP2Memset3DAsync
709
+ #define __cudaCDP2Memset3DAsync_ptsz
710
+ #define __cudaCDP2RuntimeGetVersion
711
+ #define __cudaCDP2GetParameterBuffer
712
+ #define __cudaCDP2GetParameterBufferV2
713
+ #define __cudaCDP2LaunchDevice_ptsz
714
+ #define __cudaCDP2LaunchDeviceV2_ptsz
715
+ #define __cudaCDP2LaunchDevice
716
+ #define __cudaCDP2LaunchDeviceV2
717
+ #define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor
718
+ #define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags
719
+
720
+ }
721
+
722
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaMalloc(T **devPtr, size_t size);
723
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaFuncGetAttributes(struct cudaFuncAttributes *attr, T *entry);
724
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize);
725
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
726
+
727
+
728
+ #endif // !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350)
729
+ #endif /* defined(__cplusplus) && defined(__CUDACC__) */
730
+
731
+ #undef __DEPRECATED__
732
+ #undef __CDPRT_DEPRECATED
733
+ #undef __CUDA_INTERNAL_USE_CDP2
734
+
735
+ #endif /* !__CUDA_DEVICE_RUNTIME_API_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_EGL_INTEROP_H__)
51
+ #define __CUDA_EGL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+ #include "cuda_runtime.h"
55
+ #include "cudart_platform.h"
56
+ #include "EGL/egl.h"
57
+ #include "EGL/eglext.h"
58
+
59
+ #if defined(__cplusplus)
60
+ extern "C" {
61
+ #endif /* __cplusplus */
62
+
63
+ /**
64
+ * \addtogroup CUDART_TYPES
65
+ * @{
66
+ */
67
+
68
+ /**
69
+ * Maximum number of planes per frame
70
+ */
71
+ #define CUDA_EGL_MAX_PLANES 3
72
+
73
+ /**
74
+ * CUDA EglFrame type - array or pointer
75
+ */
76
+ typedef enum cudaEglFrameType_enum
77
+ {
78
+ cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */
79
+ cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */
80
+ } cudaEglFrameType;
81
+
82
+ /**
83
+ * Resource location flags- sysmem or vidmem
84
+ *
85
+ * For CUDA context on iGPU, since video and system memory are equivalent -
86
+ * these flags will not have an effect on the execution.
87
+ *
88
+ * For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags
89
+ * to give a hint about the desired location.
90
+ *
91
+ * ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory
92
+ * to be accessed by CUDA.
93
+ *
94
+ * ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated
95
+ * video memory to be accessed by CUDA.
96
+ *
97
+ * There may be an additional latency due to new allocation and data migration,
98
+ * if the frame is produced on a different memory.
99
+ */
100
+ typedef enum cudaEglResourceLocationFlags_enum {
101
+ cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */
102
+ cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */
103
+ } cudaEglResourceLocationFlags;
104
+
105
+ /**
106
+ * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
107
+ */
108
+ typedef enum cudaEglColorFormat_enum {
109
+ cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
110
+ cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
111
+ cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
112
+ cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
113
+ cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
114
+ cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
115
+ cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */
116
+ cudaEglColorFormatR = 9, /**< single color channel in one surface. */
117
+ cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
118
+ cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
119
+ cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
120
+ cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
121
+ cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
122
+ cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
123
+ cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */
124
+ cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */
125
+ cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
126
+ cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
127
+ cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
128
+ cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
129
+ cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
130
+ cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
131
+ cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
132
+ cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
133
+ cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
134
+ cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
135
+ cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
136
+ cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
137
+ cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
138
+ cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
139
+ cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
140
+ cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
141
+ cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
142
+ cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
143
+ cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
144
+ cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
145
+ cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
146
+ cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
147
+ cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
148
+ cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
149
+ cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
150
+ cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
151
+ cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
152
+ cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
153
+ cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
154
+ cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
155
+ cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
156
+ cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
157
+ cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
158
+ cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
159
+ cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
160
+ cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
161
+ cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
162
+ cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
163
+ cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
164
+ cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
165
+ cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
166
+ cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
167
+ cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
168
+ cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
169
+ cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
170
+ cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
171
+ cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
172
+ cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
173
+ cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
174
+ cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
175
+ cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
176
+ cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
177
+ cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
178
+ cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
179
+ cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
180
+ cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
181
+ cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
182
+ cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
183
+ cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
184
+ cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
185
+ cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
186
+ cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
187
+ cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
188
+ cudaEglColorFormatY = 82, /**< Color format for single Y plane. */
189
+ cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
190
+ cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
191
+ cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
192
+ cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
193
+ cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
194
+ cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
195
+ cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
196
+ cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
197
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
198
+ cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
199
+ cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
200
+ cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
201
+ cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
202
+ cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */
203
+ cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */
204
+ cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */
205
+ cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */
206
+ cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */
207
+ cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */
208
+ cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
209
+ cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
210
+ cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
211
+ cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
212
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
213
+ cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
214
+ cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
215
+ cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
216
+ cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
217
+ cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
218
+ cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
219
+ } cudaEglColorFormat;
220
+
221
+ /**
222
+ * CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame
223
+ */
224
+ typedef struct cudaEglPlaneDesc_st {
225
+ unsigned int width; /**< Width of plane */
226
+ unsigned int height; /**< Height of plane */
227
+ unsigned int depth; /**< Depth of plane */
228
+ unsigned int pitch; /**< Pitch of plane */
229
+ unsigned int numChannels; /**< Number of channels for the plane */
230
+ struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */
231
+ unsigned int reserved[4]; /**< Reserved for future use */
232
+ } cudaEglPlaneDesc;
233
+
234
+ /**
235
+ * CUDA EGLFrame Descriptor - structure defining one frame of EGL.
236
+ *
237
+ * Each frame may contain one or more planes depending on whether the surface is Multiplanar or not.
238
+ * Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as:
239
+ * \code
240
+ * typedef struct cudaEglPlaneDesc_st {
241
+ * unsigned int width;
242
+ * unsigned int height;
243
+ * unsigned int depth;
244
+ * unsigned int pitch;
245
+ * unsigned int numChannels;
246
+ * struct cudaChannelFormatDesc channelDesc;
247
+ * unsigned int reserved[4];
248
+ * } cudaEglPlaneDesc;
249
+ * \endcode
250
+
251
+ */
252
+ typedef struct cudaEglFrame_st {
253
+ union {
254
+ cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/
255
+ struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
256
+ } frame;
257
+ cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/
258
+ unsigned int planeCount; /**< Number of planes */
259
+ cudaEglFrameType frameType; /**< Array or Pitch */
260
+ cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
261
+ } cudaEglFrame;
262
+
263
+ /**
264
+ * CUDA EGLSream Connection
265
+ */
266
+ typedef struct CUeglStreamConnection_st *cudaEglStreamConnection;
267
+
268
+ /** @} */ /* END CUDART_TYPES */
269
+
270
+ /**
271
+ * \addtogroup CUDART_EGL EGL Interoperability
272
+ * This section describes the EGL interoperability functions of the CUDA
273
+ * runtime application programming interface.
274
+ *
275
+ * @{
276
+ */
277
+
278
+ /**
279
+ * \brief Registers an EGL image
280
+ *
281
+ * Registers the EGLImageKHR specified by \p image for access by
282
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
283
+ * Additional Mapping/Unmapping is not required for the registered resource and
284
+ * ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
285
+ *
286
+ * The application will be responsible for synchronizing access to shared objects.
287
+ * The application must ensure that any pending operation which access the objects have completed
288
+ * before passing control to CUDA. This may be accomplished by issuing and waiting for
289
+ * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
290
+ * The application will be also responsible for ensuring that any pending operation on the
291
+ * registered CUDA resource has completed prior to executing subsequent commands in other APIs
292
+ * accesing the same memory objects.
293
+ * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
294
+ *
295
+ * The surface's intended usage is specified using \p flags, as follows:
296
+ *
297
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
298
+ * resource will be used. It is therefore assumed that this resource will be
299
+ * read from and written to by CUDA. This is the default value.
300
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
301
+ * will not write to this resource.
302
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
303
+ * CUDA will not read from this resource and will write over the
304
+ * entire contents of the resource, so none of the data previously
305
+ * stored in the resource will be preserved.
306
+ *
307
+ * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
308
+ * typedef void* EGLImageKHR
309
+ *
310
+ * \param pCudaResource - Pointer to the returned object handle
311
+ * \param image - An EGLImageKHR image which can be used to create target resource.
312
+ * \param flags - Map flags
313
+ *
314
+ * \return
315
+ * ::cudaSuccess,
316
+ * ::cudaErrorInvalidResourceHandle,
317
+ * ::cudaErrorInvalidValue,
318
+ * ::cudaErrorUnknown
319
+ *
320
+ * \sa
321
+ * ::cudaGraphicsUnregisterResource,
322
+ * ::cudaGraphicsResourceGetMappedEglFrame,
323
+ * ::cuGraphicsEGLRegisterImage
324
+ */
325
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags);
326
+
327
+ /**
328
+ * \brief Connect CUDA to EGLStream as a consumer.
329
+ *
330
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream.
331
+ *
332
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
333
+ * API to another.
334
+ *
335
+ * \param conn - Pointer to the returned connection handle
336
+ * \param eglStream - EGLStreamKHR handle
337
+ *
338
+ * \return
339
+ * ::cudaSuccess,
340
+ * ::cudaErrorInvalidValue,
341
+ * ::cudaErrorUnknown
342
+ *
343
+ * \sa
344
+ * ::cudaEGLStreamConsumerDisconnect,
345
+ * ::cudaEGLStreamConsumerAcquireFrame,
346
+ * ::cudaEGLStreamConsumerReleaseFrame,
347
+ * ::cuEGLStreamConsumerConnect
348
+ */
349
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream);
350
+
351
+ /**
352
+ * \brief Connect CUDA to EGLStream as a consumer with given flags.
353
+ *
354
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by
355
+ * ::cudaEglResourceLocationFlags.
356
+ *
357
+ * The flags specify whether the consumer wants to access frames from system memory or video memory.
358
+ * Default is ::cudaEglResourceLocationVidmem.
359
+ *
360
+ * \param conn - Pointer to the returned connection handle
361
+ * \param eglStream - EGLStreamKHR handle
362
+ * \param flags - Flags denote intended location - system or video.
363
+ *
364
+ * \return
365
+ * ::cudaSuccess,
366
+ * ::cudaErrorInvalidValue,
367
+ * ::cudaErrorUnknown
368
+ *
369
+ * \sa
370
+ * ::cudaEGLStreamConsumerDisconnect,
371
+ * ::cudaEGLStreamConsumerAcquireFrame,
372
+ * ::cudaEGLStreamConsumerReleaseFrame,
373
+ * ::cuEGLStreamConsumerConnectWithFlags
374
+ */
375
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags);
376
+
377
+ /**
378
+ * \brief Disconnect CUDA as a consumer to EGLStream .
379
+ *
380
+ * Disconnect CUDA as a consumer to EGLStreamKHR.
381
+ *
382
+ * \param conn - Conection to disconnect.
383
+ *
384
+ * \return
385
+ * ::cudaSuccess,
386
+ * ::cudaErrorInvalidValue,
387
+ * ::cudaErrorUnknown
388
+ *
389
+ * \sa
390
+ * ::cudaEGLStreamConsumerConnect,
391
+ * ::cudaEGLStreamConsumerAcquireFrame,
392
+ * ::cudaEGLStreamConsumerReleaseFrame,
393
+ * ::cuEGLStreamConsumerDisconnect
394
+ */
395
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn);
396
+
397
+ /**
398
+ * \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
399
+ *
400
+ * Acquire an image frame from EGLStreamKHR.
401
+ * ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
402
+ * ::cudaEglFrame.
403
+ *
404
+ * \param conn - Connection on which to acquire
405
+ * \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use.
406
+ * \param pStream - CUDA stream for synchronization and any data migrations
407
+ * implied by ::cudaEglResourceLocationFlags.
408
+ * \param timeout - Desired timeout in usec.
409
+ *
410
+ * \return
411
+ * ::cudaSuccess,
412
+ * ::cudaErrorInvalidValue,
413
+ * ::cudaErrorUnknown,
414
+ * ::cudaErrorLaunchTimeout
415
+ *
416
+ * \sa
417
+ * ::cudaEGLStreamConsumerConnect,
418
+ * ::cudaEGLStreamConsumerDisconnect,
419
+ * ::cudaEGLStreamConsumerReleaseFrame,
420
+ * ::cuEGLStreamConsumerAcquireFrame
421
+ */
422
+
423
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn,
424
+ cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout);
425
+ /**
426
+ * \brief Releases the last frame acquired from the EGLStream.
427
+ *
428
+ * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
429
+ *
430
+ * \param conn - Connection on which to release
431
+ * \param pCudaResource - CUDA resource whose corresponding frame is to be released
432
+ * \param pStream - CUDA stream on which release will be done.
433
+ *
434
+ * \return
435
+ * ::cudaSuccess,
436
+ * ::cudaErrorInvalidValue,
437
+ * ::cudaErrorUnknown
438
+ *
439
+ * \sa
440
+ * ::cudaEGLStreamConsumerConnect,
441
+ * ::cudaEGLStreamConsumerDisconnect,
442
+ * ::cudaEGLStreamConsumerAcquireFrame,
443
+ * ::cuEGLStreamConsumerReleaseFrame
444
+ */
445
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn,
446
+ cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream);
447
+
448
+ /**
449
+ * \brief Connect CUDA to EGLStream as a producer.
450
+ *
451
+ * Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
452
+ *
453
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
454
+ * API to another.
455
+ *
456
+ * \param conn - Pointer to the returned connection handle
457
+ * \param eglStream - EGLStreamKHR handle
458
+ * \param width - width of the image to be submitted to the stream
459
+ * \param height - height of the image to be submitted to the stream
460
+ *
461
+ * \return
462
+ * ::cudaSuccess,
463
+ * ::cudaErrorInvalidValue,
464
+ * ::cudaErrorUnknown
465
+ *
466
+ * \sa
467
+ * ::cudaEGLStreamProducerDisconnect,
468
+ * ::cudaEGLStreamProducerPresentFrame,
469
+ * ::cudaEGLStreamProducerReturnFrame,
470
+ * ::cuEGLStreamProducerConnect
471
+ */
472
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn,
473
+ EGLStreamKHR eglStream, EGLint width, EGLint height);
474
+
475
+ /**
476
+ * \brief Disconnect CUDA as a producer to EGLStream .
477
+ *
478
+ * Disconnect CUDA as a producer to EGLStreamKHR.
479
+ *
480
+ * \param conn - Conection to disconnect.
481
+ *
482
+ * \return
483
+ * ::cudaSuccess,
484
+ * ::cudaErrorInvalidValue,
485
+ * ::cudaErrorUnknown
486
+ *
487
+ * \sa
488
+ * ::cudaEGLStreamProducerConnect,
489
+ * ::cudaEGLStreamProducerPresentFrame,
490
+ * ::cudaEGLStreamProducerReturnFrame,
491
+ * ::cuEGLStreamProducerDisconnect
492
+ */
493
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn);
494
+
495
+ /**
496
+ * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
497
+ *
498
+ * The ::cudaEglFrame is defined as:
499
+ * \code
500
+ * typedef struct cudaEglFrame_st {
501
+ * union {
502
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
503
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
504
+ * } frame;
505
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
506
+ * unsigned int planeCount;
507
+ * cudaEglFrameType frameType;
508
+ * cudaEglColorFormat eglColorFormat;
509
+ * } cudaEglFrame;
510
+ * \endcode
511
+ *
512
+ * For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory
513
+ * allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in
514
+ * the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region.
515
+ *
516
+ * \param conn - Connection on which to present the CUDA array
517
+ * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
518
+ * \param pStream - CUDA stream on which to present the frame.
519
+ *
520
+ * \return
521
+ * ::cudaSuccess,
522
+ * ::cudaErrorInvalidValue,
523
+ * ::cudaErrorUnknown
524
+ *
525
+ * \sa
526
+ * ::cudaEGLStreamProducerConnect,
527
+ * ::cudaEGLStreamProducerDisconnect,
528
+ * ::cudaEGLStreamProducerReturnFrame,
529
+ * ::cuEGLStreamProducerPresentFrame
530
+ */
531
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn,
532
+ cudaEglFrame eglframe, cudaStream_t *pStream);
533
+
534
+ /**
535
+ * \brief Return the CUDA eglFrame to the EGLStream last released by the consumer.
536
+ *
537
+ * This API can potentially return cudaErrorLaunchTimeout if the consumer has not
538
+ * returned a frame to EGL stream. If timeout is returned the application can retry.
539
+ *
540
+ * \param conn - Connection on which to present the CUDA array
541
+ * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
542
+ * \param pStream - CUDA stream on which to return the frame.
543
+ *
544
+ * \return
545
+ * ::cudaSuccess,
546
+ * ::cudaErrorLaunchTimeout,
547
+ * ::cudaErrorInvalidValue,
548
+ * ::cudaErrorUnknown
549
+ *
550
+ * \sa
551
+ * ::cudaEGLStreamProducerConnect,
552
+ * ::cudaEGLStreamProducerDisconnect,
553
+ * ::cudaEGLStreamProducerPresentFrame,
554
+ * ::cuEGLStreamProducerReturnFrame
555
+ */
556
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn,
557
+ cudaEglFrame *eglframe, cudaStream_t *pStream);
558
+
559
+ /**
560
+ * \brief Get an eglFrame through which to access a registered EGL graphics resource.
561
+ *
562
+ * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
563
+ * \p resource may be accessed.
564
+ * This API can only be called for EGL graphics resources.
565
+ *
566
+ * The ::cudaEglFrame is defined as
567
+ * \code
568
+ * typedef struct cudaEglFrame_st {
569
+ * union {
570
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
571
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
572
+ * } frame;
573
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
574
+ * unsigned int planeCount;
575
+ * cudaEglFrameType frameType;
576
+ * cudaEglColorFormat eglColorFormat;
577
+ * } cudaEglFrame;
578
+ * \endcode
579
+ *
580
+ *
581
+ * \param eglFrame - Returned eglFrame.
582
+ * \param resource - Registered resource to access.
583
+ * \param index - Index for cubemap surfaces.
584
+ * \param mipLevel - Mipmap level for the subresource to access.
585
+ *
586
+ * \return
587
+ * ::cudaSuccess,
588
+ * ::cudaErrorInvalidValue,
589
+ * ::cudaErrorUnknown
590
+ *
591
+ * \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application.
592
+ *
593
+ * \sa
594
+ * ::cudaGraphicsSubResourceGetMappedArray,
595
+ * ::cudaGraphicsResourceGetMappedPointer,
596
+ * ::cuGraphicsResourceGetMappedEglFrame
597
+ */
598
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame,
599
+ cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel);
600
+
601
+ /**
602
+ * \brief Creates an event from EGLSync object
603
+ *
604
+ * Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified
605
+ * via \p flags. Valid flags include:
606
+ * - ::cudaEventDefault: Default event creation flag.
607
+ * - ::cudaEventBlockingSync: Specifies that the created event should use blocking
608
+ * synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on
609
+ * an event created with this flag will block until the event has actually
610
+ * been completed.
611
+ *
612
+ * ::cudaEventRecord and TimingData are not supported for events created from EGLSync.
613
+ *
614
+ * The EGLSyncKHR is an opaque handle to an EGL sync object.
615
+ * typedef void* EGLSyncKHR
616
+ *
617
+ * \param phEvent - Returns newly created event
618
+ * \param eglSync - Opaque handle to EGLSync object
619
+ * \param flags - Event creation flags
620
+ *
621
+ * \return
622
+ * ::cudaSuccess,
623
+ * ::cudaErrorInitializationError,
624
+ * ::cudaErrorInvalidValue,
625
+ * ::cudaErrorLaunchFailure,
626
+ * ::cudaErrorMemoryAllocation
627
+ *
628
+ * \sa
629
+ * ::cudaEventQuery,
630
+ * ::cudaEventSynchronize,
631
+ * ::cudaEventDestroy
632
+ */
633
+ extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags);
634
+
635
+ /** @} */ /* END CUDART_EGL */
636
+
637
+ #if defined(__cplusplus)
638
+ }
639
+ #endif /* __cplusplus */
640
+
641
+ #endif /* __CUDA_EGL_INTEROP_H__ */
642
+
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef __CUDA_FP8_H__
51
+ #define __CUDA_FP8_H__
52
+
53
+ /* Set up function decorations */
54
+ #if defined(__CUDACC__)
55
+ #define __CUDA_FP8_DECL__ static __device__ __inline__
56
+ #define __CUDA_HOSTDEVICE_FP8__ __host__ __device__
57
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static __host__ __device__ __inline__
58
+ #else /* !defined(__CUDACC__) */
59
+ #if defined(__GNUC__)
60
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static __attribute__((unused))
61
+ #else
62
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static
63
+ #endif /* defined(__GNUC__) */
64
+ #define __CUDA_HOSTDEVICE_FP8__
65
+ #endif /* defined(__CUDACC_) */
66
+
67
+ #if !defined(_MSC_VER) && __cplusplus >= 201103L
68
+ #define __CPP_VERSION_AT_LEAST_11_FP8
69
+ #elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L
70
+ #define __CPP_VERSION_AT_LEAST_11_FP8
71
+ #endif
72
+
73
+ /* bring in __half_raw data type */
74
+ #include "cuda_fp16.h"
75
+ /* bring in __nv_bfloat16_raw data type */
76
+ #include "cuda_bf16.h"
77
+ /* bring in float2, double4, etc vector types */
78
+ #include "vector_types.h"
79
+
80
+ /**
81
+ * \defgroup CUDA_MATH_INTRINSIC_FP8 FP8 Intrinsics
82
+ * This section describes fp8 intrinsic functions.
83
+ * To use these functions, include the header file \p cuda_fp8.h in your
84
+ * program.
85
+ * The following macros are available to help users selectively enable/disable
86
+ * various definitions present in the header file:
87
+ * - \p __CUDA_NO_FP8_CONVERSIONS__ - If defined, this macro will prevent any
88
+ * use of the C++ type conversions (converting constructors and conversion
89
+ * operators) defined in the header.
90
+ * - \p __CUDA_NO_FP8_CONVERSION_OPERATORS__ - If defined, this macro will
91
+ * prevent any use of the C++ conversion operators from \p fp8 to other types.
92
+ */
93
+
94
+ /**
95
+ * \defgroup CUDA_MATH_FP8_MISC FP8 Conversion and Data Movement
96
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
97
+ * To use these functions, include the header file \p cuda_fp8.h in your
98
+ * program.
99
+ */
100
+
101
+ /**
102
+ * \ingroup CUDA_MATH_FP8_MISC
103
+ * \brief 8-bit \p unsigned \p integer
104
+ * type abstraction used to for \p fp8 floating-point
105
+ * numbers storage.
106
+ */
107
+ typedef unsigned char __nv_fp8_storage_t;
108
+
109
+ /**
110
+ * \ingroup CUDA_MATH_FP8_MISC
111
+ * \brief 16-bit \p unsigned \p integer
112
+ * type abstraction used to for storage of pairs of
113
+ * \p fp8 floating-point numbers.
114
+ */
115
+ typedef unsigned short int __nv_fp8x2_storage_t;
116
+
117
+ /**
118
+ * \ingroup CUDA_MATH_FP8_MISC
119
+ * \brief 32-bit \p unsigned \p integer
120
+ * type abstraction used to for storage of tetrads of
121
+ * \p fp8 floating-point numbers.
122
+ */
123
+ typedef unsigned int __nv_fp8x4_storage_t;
124
+
125
+ /**
126
+ * \ingroup CUDA_MATH_FP8_MISC
127
+ * \brief Enumerates the modes applicable when
128
+ * performing a narrowing conversion to \p fp8 destination types.
129
+ */
130
+ typedef enum __nv_saturation_t {
131
+ /**
132
+ * Means no saturation to finite is performed when conversion
133
+ * results in rounding values outside the range of destination
134
+ * type.
135
+ * NOTE: for fp8 type of e4m3 kind, the results that are larger
136
+ * than the maximum representable finite number of the target
137
+ * format become NaN.
138
+ */
139
+ __NV_NOSAT,
140
+ /**
141
+ * Means input larger than the maximum representable
142
+ * finite number MAXNORM of the target format round to the
143
+ * MAXNORM of the same sign as input.
144
+ */
145
+ __NV_SATFINITE,
146
+ } __nv_saturation_t;
147
+
148
+ /**
149
+ * \ingroup CUDA_MATH_FP8_MISC
150
+ * \brief Enumerates the possible
151
+ * interpretations of the 8-bit values when referring to them as
152
+ * \p fp8 types.
153
+ */
154
+ typedef enum __nv_fp8_interpretation_t {
155
+ __NV_E4M3, /**< Stands for \p fp8 numbers of \p e4m3 kind. */
156
+ __NV_E5M2, /**< Stands for \p fp8 numbers of \p e5m2 kind. */
157
+ } __nv_fp8_interpretation_t;
158
+
159
+ /* Forward-declaration of C-style APIs */
160
+
161
+ /**
162
+ * \ingroup CUDA_MATH_FP8_MISC
163
+ * \brief Converts input \p double precision \p x to \p fp8 type of the
164
+ * requested kind using round-to-nearest-even rounding and requested saturation
165
+ * mode.
166
+ *
167
+ * \details Converts input \p x to \p fp8 type of the kind specified by
168
+ * \p fp8_interpretation parameter,
169
+ * using round-to-nearest-even rounding and
170
+ * saturation mode specified by \p saturate parameter.
171
+ *
172
+ * \returns
173
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
174
+ */
175
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
176
+ __nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
177
+ const __nv_fp8_interpretation_t fp8_interpretation);
178
+
179
+ /**
180
+ * \ingroup CUDA_MATH_FP8_MISC
181
+ * \brief Converts input vector of two \p double precision numbers packed
182
+ * in \p double2 \p x into a vector of two values of \p fp8 type of
183
+ * the requested kind using round-to-nearest-even rounding and requested
184
+ * saturation mode.
185
+ *
186
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
187
+ * kind specified by \p fp8_interpretation parameter, using
188
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
189
+ * parameter.
190
+ *
191
+ * \returns
192
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
193
+ */
194
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
195
+ __nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
196
+ const __nv_fp8_interpretation_t fp8_interpretation);
197
+
198
+ /**
199
+ * \ingroup CUDA_MATH_FP8_MISC
200
+ * \brief Converts input \p single precision \p x to \p fp8 type of the
201
+ * requested kind using round-to-nearest-even rounding and requested saturation
202
+ * mode.
203
+ *
204
+ * \details Converts input \p x to \p fp8 type of the kind specified by
205
+ * \p fp8_interpretation parameter,
206
+ * using round-to-nearest-even rounding and
207
+ * saturation mode specified by \p saturate parameter.
208
+ *
209
+ * \returns
210
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
211
+ */
212
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
213
+ __nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
214
+ const __nv_fp8_interpretation_t fp8_interpretation);
215
+
216
+ /**
217
+ * \ingroup CUDA_MATH_FP8_MISC
218
+ * \brief Converts input vector of two \p single precision numbers packed
219
+ * in \p float2 \p x into a vector of two values of \p fp8 type of
220
+ * the requested kind using round-to-nearest-even rounding and requested
221
+ * saturation mode.
222
+ *
223
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
224
+ * kind specified by \p fp8_interpretation parameter, using
225
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
226
+ * parameter.
227
+ *
228
+ * \returns
229
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
230
+ */
231
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
232
+ __nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
233
+ const __nv_fp8_interpretation_t fp8_interpretation);
234
+
235
+ /**
236
+ * \ingroup CUDA_MATH_FP8_MISC
237
+ * \brief Converts input \p half precision \p x to \p fp8 type of the requested
238
+ * kind using round-to-nearest-even rounding and requested saturation mode.
239
+ *
240
+ * \details Converts input \p x to \p fp8 type of the kind specified by
241
+ * \p fp8_interpretation parameter,
242
+ * using round-to-nearest-even rounding and
243
+ * saturation mode specified by \p saturate parameter.
244
+ *
245
+ * \returns
246
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
247
+ */
248
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
249
+ __nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
250
+ const __nv_fp8_interpretation_t fp8_interpretation);
251
+
252
+ /**
253
+ * \ingroup CUDA_MATH_FP8_MISC
254
+ * \brief Converts input vector of two \p half precision numbers packed
255
+ * in \p __half2_raw \p x into a vector of two values of \p fp8 type of
256
+ * the requested kind using round-to-nearest-even rounding and requested
257
+ * saturation mode.
258
+ *
259
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
260
+ * kind specified by \p fp8_interpretation parameter, using
261
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
262
+ * parameter.
263
+ *
264
+ * \returns
265
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
266
+ */
267
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
268
+ const __half2_raw x, const __nv_saturation_t saturate,
269
+ const __nv_fp8_interpretation_t fp8_interpretation);
270
+
271
+ /**
272
+ * \ingroup CUDA_MATH_FP8_MISC
273
+ * \brief Converts input \p nv_bfloat16 precision \p x to \p fp8 type of the
274
+ * requested kind using round-to-nearest-even rounding and requested saturation
275
+ * mode.
276
+ *
277
+ * \details Converts input \p x to \p fp8 type of the kind specified by
278
+ * \p fp8_interpretation parameter,
279
+ * using round-to-nearest-even rounding and
280
+ * saturation mode specified by \p saturate parameter.
281
+ *
282
+ * \returns
283
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
284
+ */
285
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
286
+ const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
287
+ const __nv_fp8_interpretation_t fp8_interpretation);
288
+
289
+ /**
290
+ * \ingroup CUDA_MATH_FP8_MISC
291
+ * \brief Converts input vector of two \p nv_bfloat16 precision numbers packed
292
+ * in \p __nv_bfloat162_raw \p x into a vector of two values of \p fp8 type of
293
+ * the requested kind using round-to-nearest-even rounding and requested
294
+ * saturation mode.
295
+ *
296
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
297
+ * kind specified by \p fp8_interpretation parameter, using
298
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
299
+ * parameter.
300
+ *
301
+ * \returns
302
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
303
+ */
304
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
305
+ __nv_cvt_bfloat16raw2_to_fp8x2(
306
+ const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
307
+ const __nv_fp8_interpretation_t fp8_interpretation);
308
+
309
+ /**
310
+ * \ingroup CUDA_MATH_FP8_MISC
311
+ * \brief Converts input \p fp8 \p x of the specified kind
312
+ * to \p half precision.
313
+ *
314
+ * \details Converts input \p x of \p fp8 type of the kind specified by
315
+ * \p fp8_interpretation parameter
316
+ * to \p half precision.
317
+ *
318
+ * \returns
319
+ * - The \p __half_raw value holds the result of conversion.
320
+ */
321
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
322
+ __nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
323
+ const __nv_fp8_interpretation_t fp8_interpretation);
324
+ /**
325
+ * \ingroup CUDA_MATH_FP8_MISC
326
+ * \brief Converts input vector of two \p fp8 values of the specified kind
327
+ * to a vector of two \p half precision values packed in \p __half2_raw
328
+ * structure.
329
+ *
330
+ * \details Converts input vector \p x of \p fp8 type of the kind specified by
331
+ * \p fp8_interpretation parameter
332
+ * to a vector of two \p half precision values and returns as \p __half2_raw
333
+ * structure.
334
+ *
335
+ * \returns
336
+ * - The \p __half2_raw value holds the result of conversion.
337
+ */
338
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
339
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
340
+ const __nv_fp8_interpretation_t fp8_interpretation);
341
+
342
+ #if defined(__cplusplus)
343
+
344
+ #define __CUDA_FP8_TYPES_EXIST__
345
+
346
+ /* Forward-declaration of structures defined in "cuda_fp8.hpp" */
347
+ struct __nv_fp8_e5m2;
348
+ struct __nv_fp8x2_e5m2;
349
+ struct __nv_fp8x4_e5m2;
350
+
351
+ struct __nv_fp8_e4m3;
352
+ struct __nv_fp8x2_e4m3;
353
+ struct __nv_fp8x4_e4m3;
354
+
355
+ #endif /* defined(__cplusplus) */
356
+
357
+ #include "cuda_fp8.hpp"
358
+
359
+ #undef __CUDA_FP8_DECL__
360
+ #undef __CUDA_HOSTDEVICE_FP8__
361
+ #undef __CUDA_HOSTDEVICE_FP8_DECL__
362
+
363
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
364
+ #undef __CPP_VERSION_AT_LEAST_11_FP8
365
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
366
+
367
+ #endif /* end of include guard: __CUDA_FP8_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp ADDED
@@ -0,0 +1,1546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_FP8_HPP__)
51
+ #define __CUDA_FP8_HPP__
52
+
53
+ #if !defined(__CUDA_FP8_H__)
54
+ #error "Do not include this file directly. Instead, include cuda_fp8.h."
55
+ #endif
56
+
57
+ /* C++ header for std::memcpy (used for type punning in host-side
58
+ * implementations). When compiling as a CUDA source file memcpy is provided
59
+ * implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
60
+ */
61
+ #if defined(__cplusplus) && !defined(__CUDACC__)
62
+ #include <cstring>
63
+ #elif !defined(__cplusplus) && !defined(__CUDACC__)
64
+ #include <string.h>
65
+ #endif /* defined(__cplusplus) && !defined(__CUDACC__) */
66
+
67
+ /* Set up structure-alignment attribute */
68
+ #if !(defined __CUDA_ALIGN__)
69
+ #if defined(__CUDACC__)
70
+ #define __CUDA_ALIGN__(align) __align__(align)
71
+ #else
72
+ /* Define alignment macro based on compiler type (cannot assume C11 "_Alignas"
73
+ * is available) */
74
+ #if __cplusplus >= 201103L
75
+ #define __CUDA_ALIGN__(n) \
76
+ alignas(n) /* C++11 kindly gives us a keyword for this */
77
+ #else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/
78
+ #if defined(__GNUC__)
79
+ #define __CUDA_ALIGN__(n) __attribute__((aligned(n)))
80
+ #elif defined(_MSC_VER)
81
+ #define __CUDA_ALIGN__(n) __declspec(align(n))
82
+ #else
83
+ #define __CUDA_ALIGN__(n)
84
+ #endif /* defined(__GNUC__) */
85
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
86
+ #endif /* defined(__CUDACC__) */
87
+ #endif /* !(defined __CUDA_ALIGN__) */
88
+
89
+ #if !(defined __CPP_VERSION_AT_LEAST_11_FP8)
90
+ /* need c++11 for explicit operators */
91
+ #define __CUDA_NO_FP8_CONVERSION_OPERATORS__
92
+ #endif
93
+
94
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
95
+ __nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
96
+ const __nv_fp8_interpretation_t fp8_interpretation) {
97
+ unsigned char res;
98
+ unsigned long long int xbits;
99
+
100
+ #if defined(__CUDACC__) || (!defined __cplusplus)
101
+ (void)memcpy(&xbits, &x, sizeof(x));
102
+ #else
103
+ (void)std::memcpy(&xbits, &x, sizeof(x));
104
+ #endif
105
+ unsigned char FP8_MAXNORM;
106
+ unsigned char FP8_MANTISSA_MASK;
107
+ unsigned short int FP8_EXP_BIAS;
108
+ unsigned long long int FP8_SIGNIFICAND_BITS;
109
+ const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL;
110
+ unsigned long long int FP8_MINDENORM_O2;
111
+ unsigned long long int FP8_OVERFLOW_THRESHOLD;
112
+ unsigned long long int FP8_MINNORM;
113
+
114
+ if (fp8_interpretation == __NV_E4M3) {
115
+ FP8_EXP_BIAS = 7U;
116
+ FP8_SIGNIFICAND_BITS = 4ULL;
117
+ FP8_MANTISSA_MASK = 0x7U;
118
+ FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10
119
+ FP8_OVERFLOW_THRESHOLD =
120
+ 0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4
121
+ FP8_MAXNORM = 0x7EU;
122
+ FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6
123
+ } else { //__NV_E5M2
124
+ FP8_EXP_BIAS = 15U;
125
+ FP8_SIGNIFICAND_BITS = 3ULL;
126
+ FP8_MANTISSA_MASK = 0x3U;
127
+ FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17
128
+ FP8_OVERFLOW_THRESHOLD =
129
+ 0x40EE000000000000ULL -
130
+ 1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code
131
+ FP8_MAXNORM = 0x7BU;
132
+ FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14
133
+ }
134
+
135
+ // 1/2 LSB of the target format, positioned in double precision mantissa
136
+ // helpful in midpoints detection during round-to-nearest-even step
137
+ const unsigned long long int FP8_DP_HALF_ULP =
138
+ (unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL);
139
+ // prepare sign bit in target format
140
+ unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U);
141
+ // prepare exponent field in target format
142
+ unsigned char exp =
143
+ (unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) -
144
+ 1023U + FP8_EXP_BIAS);
145
+ // round mantissa to target format width, rounding towards zero
146
+ unsigned char mantissa =
147
+ (unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) &
148
+ FP8_MANTISSA_MASK;
149
+ unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL;
150
+
151
+ if (absx <= FP8_MINDENORM_O2) {
152
+ // zero or underflow
153
+ res = 0U;
154
+ } else if (absx > DP_INF_BITS) {
155
+ // NaN
156
+ if (fp8_interpretation == __NV_E4M3) {
157
+ res = 0x7FU;
158
+ } else {
159
+ // NaN --> QNaN
160
+ res = 0x7EU | mantissa;
161
+ }
162
+ } else if (absx > FP8_OVERFLOW_THRESHOLD) {
163
+ if (saturate == __NV_SATFINITE) {
164
+ res = FP8_MAXNORM;
165
+ } else {
166
+ // __NV_NOSAT
167
+ if (fp8_interpretation == __NV_E4M3) {
168
+ // no Inf in E4M3
169
+ res = 0x7FU; // NaN
170
+ } else {
171
+ res = 0x7CU; // Inf in E5M2
172
+ }
173
+ }
174
+ } else if (absx >= FP8_MINNORM) {
175
+ res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa);
176
+ // rounded-off bits
177
+ unsigned long long int round =
178
+ xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL);
179
+ // round-to-nearest-even adjustment
180
+ if ((round > FP8_DP_HALF_ULP) ||
181
+ ((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) {
182
+ res = (unsigned char)(res + 1U);
183
+ }
184
+ } else // Denormal range
185
+ {
186
+ unsigned char shift = (unsigned char)(1U - exp);
187
+ // add implicit leading bit
188
+ mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U));
189
+ // additional round-off due to denormalization
190
+ res = (unsigned char)(mantissa >> shift);
191
+
192
+ // rounded-off bits, including implicit leading bit
193
+ unsigned long long int round =
194
+ (xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) &
195
+ ((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL);
196
+ // round-to-nearest-even adjustment
197
+ if ((round > (FP8_DP_HALF_ULP << shift)) ||
198
+ ((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) {
199
+ res = (unsigned char)(res + 1U);
200
+ }
201
+ }
202
+
203
+ res |= sign;
204
+
205
+ return (__nv_fp8_storage_t)res;
206
+ }
207
+
208
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
209
+ __nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
210
+ const __nv_fp8_interpretation_t fp8_interpretation) {
211
+ __nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8(
212
+ x.y, saturate, fp8_interpretation);
213
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
214
+ storage = (__nv_fp8x2_storage_t)(storage |
215
+ __nv_cvt_double_to_fp8(
216
+ x.x, saturate, fp8_interpretation));
217
+ return storage;
218
+ }
219
+
220
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
221
+ __nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
222
+ const __nv_fp8_interpretation_t fp8_interpretation) {
223
+ __nv_fp8_storage_t res = 0U;
224
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
225
+ if (saturate == __NV_SATFINITE) {
226
+ __nv_fp8x2_storage_t storage;
227
+ if (fp8_interpretation == __NV_E5M2) {
228
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
229
+ : "=h"(storage)
230
+ : "f"(x), "f"(0.0f));
231
+ } else {
232
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
233
+ : "=h"(storage)
234
+ : "f"(x), "f"(0.0f));
235
+ }
236
+ res = (__nv_fp8_storage_t)storage;
237
+ } else
238
+ #endif
239
+ {
240
+ unsigned int xbits;
241
+ #if defined(__CUDACC__) || (!defined __cplusplus)
242
+ (void)memcpy(&xbits, &x, sizeof(x));
243
+ #else
244
+ (void)std::memcpy(&xbits, &x, sizeof(x));
245
+ #endif
246
+
247
+ // isnan
248
+ if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) {
249
+ // Canonical NaN
250
+ xbits = 0x7FFFFFFFU;
251
+ }
252
+
253
+ float fx;
254
+ #if defined(__CUDACC__) || (!defined __cplusplus)
255
+ (void)memcpy(&fx, &xbits, sizeof(xbits));
256
+ #else
257
+ (void)std::memcpy(&fx, &xbits, sizeof(xbits));
258
+ #endif
259
+
260
+ const double dx = (double)fx;
261
+ res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation);
262
+ }
263
+ return res;
264
+ }
265
+
266
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
267
+ __nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
268
+ const __nv_fp8_interpretation_t fp8_interpretation) {
269
+ __nv_fp8x2_storage_t storage;
270
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
271
+ if (saturate == __NV_SATFINITE) {
272
+ if (fp8_interpretation == __NV_E5M2) {
273
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
274
+ : "=h"(storage)
275
+ : "f"(x.x), "f"(x.y));
276
+ } else {
277
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
278
+ : "=h"(storage)
279
+ : "f"(x.x), "f"(x.y));
280
+ }
281
+ } else
282
+ #endif
283
+ {
284
+ storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8(
285
+ x.y, saturate, fp8_interpretation);
286
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
287
+ storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8(
288
+ x.x, saturate,
289
+ fp8_interpretation));
290
+ }
291
+ return storage;
292
+ }
293
+
294
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
295
+ __internal_halfraw_to_float(const __half_raw x) {
296
+ float f;
297
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
298
+ asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x));
299
+ #else
300
+ const unsigned int ux = (unsigned int)x.x;
301
+ unsigned int sign = (ux >> 15U) & 1U;
302
+ unsigned int exponent = (ux >> 10U) & 0x1fU;
303
+ unsigned int mantissa = (ux & 0x3ffU) << 13U;
304
+ if (exponent == 0x1fU) { /* NaN or Inf */
305
+ /* discard sign of a NaN */
306
+ sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
307
+ mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
308
+ exponent = 0xffU;
309
+ } else if (exponent == 0U) { /* Denorm or Zero */
310
+ if (mantissa != 0U) {
311
+ unsigned int msb;
312
+ exponent = 0x71U;
313
+ do {
314
+ msb = (mantissa & 0x400000U);
315
+ mantissa <<= 1U; /* normalize */
316
+ --exponent;
317
+ } while (msb == 0U);
318
+ mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
319
+ }
320
+ } else {
321
+ exponent += 0x70U;
322
+ }
323
+ const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
324
+ #if defined(__CUDACC__) || (!defined __cplusplus)
325
+ (void)memcpy(&f, &u, sizeof(u));
326
+ #else
327
+ (void)std::memcpy(&f, &u, sizeof(u));
328
+ #endif
329
+ #endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */
330
+ return f;
331
+ }
332
+
333
+ __CUDA_HOSTDEVICE_FP8_DECL__ float2
334
+ __internal_halfraw2_to_float2(const __half2_raw x) {
335
+ __half_raw raw;
336
+ float2 res;
337
+ raw.x = x.x;
338
+ res.x = __internal_halfraw_to_float(raw);
339
+ raw.x = x.y;
340
+ res.y = __internal_halfraw_to_float(raw);
341
+ return res;
342
+ }
343
+
344
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
345
+ __nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
346
+ const __nv_fp8_interpretation_t fp8_interpretation) {
347
+ __nv_fp8_storage_t res = 0U;
348
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
349
+ if (saturate == __NV_SATFINITE) {
350
+ unsigned int half2_storage = (unsigned int)(x.x);
351
+ __nv_fp8x2_storage_t tmp;
352
+ if (fp8_interpretation == __NV_E5M2) {
353
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
354
+ : "=h"(tmp)
355
+ : "r"(half2_storage));
356
+ } else {
357
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
358
+ : "=h"(tmp)
359
+ : "r"(half2_storage));
360
+ }
361
+ res = (__nv_fp8_storage_t)tmp;
362
+ } else
363
+ #endif
364
+ {
365
+ float fx = __internal_halfraw_to_float(x);
366
+ res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
367
+ }
368
+ return res;
369
+ }
370
+
371
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
372
+ const __half2_raw x, const __nv_saturation_t saturate,
373
+ const __nv_fp8_interpretation_t fp8_interpretation) {
374
+ __nv_fp8x2_storage_t tmp;
375
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
376
+ if (saturate == __NV_SATFINITE) {
377
+ unsigned int half2_storage;
378
+ (void)memcpy(&half2_storage, &x, sizeof(x));
379
+
380
+ if (fp8_interpretation == __NV_E5M2) {
381
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
382
+ : "=h"(tmp)
383
+ : "r"(half2_storage));
384
+ } else {
385
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
386
+ : "=h"(tmp)
387
+ : "r"(half2_storage));
388
+ }
389
+ } else
390
+ #endif
391
+ {
392
+ __half_raw raw;
393
+ raw.x = x.x;
394
+ __nv_fp8_storage_t lo =
395
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
396
+ raw.x = x.y;
397
+ __nv_fp8_storage_t hi =
398
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
399
+ tmp = hi;
400
+ tmp = (__nv_fp8x2_storage_t)(tmp << 8U);
401
+ tmp = (__nv_fp8x2_storage_t)(tmp | lo);
402
+ }
403
+ return tmp;
404
+ }
405
+
406
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
407
+ __internal_bf16raw_to_float(const __nv_bfloat16_raw x) {
408
+ const unsigned int ux = ((unsigned int)x.x) << 16U;
409
+ float fx;
410
+ #if defined(__CUDACC__) || (!defined __cplusplus)
411
+ (void)memcpy(&fx, &ux, sizeof(ux));
412
+ #else
413
+ (void)std::memcpy(&fx, &ux, sizeof(ux));
414
+ #endif
415
+ return fx;
416
+ }
417
+
418
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw
419
+ __internal_float_to_bf16raw_rz(const float x) {
420
+ unsigned int ux;
421
+ __nv_bfloat16_raw r;
422
+ #if defined(__CUDACC__) || (!defined __cplusplus)
423
+ (void)memcpy(&ux, &x, sizeof(x));
424
+ #else
425
+ (void)std::memcpy(&ux, &x, sizeof(x));
426
+ #endif
427
+ r.x = (unsigned short int)(ux >> 16U);
428
+ return r;
429
+ }
430
+
431
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
432
+ const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
433
+ const __nv_fp8_interpretation_t fp8_interpretation) {
434
+ const float fx = __internal_bf16raw_to_float(x);
435
+ const __nv_fp8_storage_t res =
436
+ __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
437
+ return res;
438
+ }
439
+
440
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
441
+ __nv_cvt_bfloat16raw2_to_fp8x2(
442
+ const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
443
+ const __nv_fp8_interpretation_t fp8_interpretation) {
444
+ __nv_bfloat16_raw raw;
445
+ raw.x = x.y;
446
+ __nv_fp8x2_storage_t storage =
447
+ (__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate,
448
+ fp8_interpretation);
449
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
450
+ raw.x = x.x;
451
+ storage = (__nv_fp8x2_storage_t)(storage |
452
+ __nv_cvt_bfloat16raw_to_fp8(
453
+ raw, saturate, fp8_interpretation));
454
+ return storage;
455
+ }
456
+
457
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
458
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
459
+ const __nv_fp8_interpretation_t fp8_interpretation);
460
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
461
+ __nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
462
+ const __nv_fp8_interpretation_t fp8_interpretation) {
463
+ __half_raw res;
464
+ res.x = 0U;
465
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
466
+ res.x =
467
+ __nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation)
468
+ .x;
469
+ #else
470
+ unsigned short int ur = (unsigned short int)x;
471
+ ur = (unsigned short int)(ur << 8U);
472
+
473
+ if (fp8_interpretation == __NV_E5M2) {
474
+ if ((ur & 0x7FFFU) > 0x7C00U) {
475
+ /* If NaN, return canonical NaN */
476
+ ur = 0x7FFFU;
477
+ }
478
+ } else { // __NV_E4M3
479
+ unsigned short int sign = ur & 0x8000U;
480
+ unsigned short int exponent =
481
+ (unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U);
482
+ unsigned short int mantissa = (ur & 0x0700U) >> 1U;
483
+ unsigned char absx = 0x7FU & (unsigned char)x;
484
+
485
+ if (absx == 0x7FU) // NaN
486
+ {
487
+ ur = 0x7FFFU; // fp16 canonical NaN, discard sign
488
+ } else if (exponent == 0x2000U) {
489
+ // zero or denormal
490
+ if (mantissa != 0U) {
491
+ // normalize
492
+ mantissa = (unsigned short int)(mantissa << 1U);
493
+ while ((mantissa & 0x0400U) == 0U) {
494
+ mantissa = (unsigned short int)(mantissa << 1U);
495
+ exponent = (unsigned short int)(exponent - 0x0400U);
496
+ }
497
+ // discard implicit leading bit
498
+ mantissa &= 0x03FFU;
499
+ } else { // Zero
500
+ exponent = 0U;
501
+ }
502
+
503
+ ur = (sign | exponent) | mantissa;
504
+ } else {
505
+ ur = (sign | exponent) | mantissa;
506
+ }
507
+ }
508
+ res.x = ur;
509
+ #endif
510
+ return res;
511
+ }
512
+
513
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
514
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
515
+ const __nv_fp8_interpretation_t fp8_interpretation) {
516
+ __half2_raw res;
517
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
518
+ unsigned int half2_storage;
519
+ if (fp8_interpretation == __NV_E5M2) {
520
+ asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
521
+ } else {
522
+ asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
523
+ }
524
+ (void)memcpy(&res, &half2_storage, sizeof(half2_storage));
525
+ #else
526
+ res.x =
527
+ __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x;
528
+ res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U),
529
+ fp8_interpretation)
530
+ .x;
531
+ #endif
532
+ return res;
533
+ }
534
+
535
+ /* All other definitions in this file are only visible to C++ compilers */
536
+ #if defined(__cplusplus)
537
+
538
+ /**
539
+ * \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind.
540
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
541
+ */
542
+
543
+ /**
544
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
545
+ * \brief __nv_fp8_e5m2 datatype
546
+ *
547
+ * \details This structure implements the datatype for handling
548
+ * \p fp8 floating-point numbers of \p e5m2 kind:
549
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
550
+ *
551
+ * The structure implements converting constructors and operators.
552
+ */
553
+ struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 {
554
+ public:
555
+ /**
556
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
557
+ * Storage variable contains the \p fp8 floating-point data.
558
+ */
559
+ __nv_fp8_storage_t __x;
560
+
561
+ /**
562
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
563
+ * Constructor by default.
564
+ */
565
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
566
+ __nv_fp8_e5m2() = default;
567
+ #else
568
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {}
569
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
570
+
571
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
572
+
573
+ /* Construct from wider FP types */
574
+ /* Note we do avoid constructor init-list because of special host/device
575
+ * compilation rules */
576
+
577
+ /**
578
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
579
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
580
+ * behavior for out-of-range values.
581
+ */
582
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) {
583
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
584
+ __NV_SATFINITE, __NV_E5M2);
585
+ }
586
+ /**
587
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
588
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
589
+ * behavior for out-of-range values.
590
+ */
591
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) {
592
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
593
+ __NV_SATFINITE, __NV_E5M2);
594
+ }
595
+ /**
596
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
597
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
598
+ * for out-of-range values.
599
+ */
600
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) {
601
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
602
+ }
603
+ /**
604
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
605
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
606
+ * behavior for out-of-range values.
607
+ */
608
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) {
609
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
610
+ }
611
+
612
+ /* Converts from integral */
613
+
614
+ /**
615
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
616
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
617
+ * __NV_SATFINITE behavior for out-of-range values.
618
+ */
619
+ explicit __CUDA_HOSTDEVICE_FP8__
620
+ __nv_fp8_e5m2(const unsigned short int val) {
621
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
622
+ }
623
+ /**
624
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
625
+ * Constructor from \p unsigned \p int data type, relies on \p
626
+ * __NV_SATFINITE behavior for out-of-range values.
627
+ */
628
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) {
629
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
630
+ }
631
+ /**
632
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
633
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
634
+ * \p __NV_SATFINITE behavior for out-of-range values.
635
+ */
636
+ explicit __CUDA_HOSTDEVICE_FP8__
637
+ __nv_fp8_e5m2(const unsigned long long int val) {
638
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
639
+ }
640
+
641
+ /**
642
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
643
+ * Constructor from \p short \p int data type.
644
+ */
645
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) {
646
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
647
+ }
648
+ /**
649
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
650
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
651
+ * for out-of-range values.
652
+ */
653
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) {
654
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
655
+ }
656
+ /**
657
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
658
+ * Constructor from \p long \p long \p int data type, relies on \p
659
+ * __NV_SATFINITE behavior for out-of-range values.
660
+ */
661
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) {
662
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
663
+ }
664
+
665
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
666
+ /* Widening FP converts */
667
+ /**
668
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
669
+ * Conversion operator to \p __half data type.
670
+ */
671
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
672
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
673
+ }
674
+ /**
675
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
676
+ * Conversion operator to \p float data type.
677
+ */
678
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
679
+ return __internal_halfraw_to_float(
680
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
681
+ }
682
+ /**
683
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
684
+ * Conversion operator to \p __nv_bfloat16 data type.
685
+ */
686
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
687
+ return static_cast<__nv_bfloat16>(
688
+ __internal_float_to_bf16raw_rz(float(*this)));
689
+ }
690
+ /**
691
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
692
+ * Conversion operator to \p double data type.
693
+ */
694
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
695
+ return static_cast<double>(float(*this));
696
+ }
697
+
698
+ /* Convert to integral */
699
+
700
+ /**
701
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
702
+ * Conversion operator to \p unsigned \p char data type.
703
+ * Clamps negative and too large inputs to the output range.
704
+ * \p NaN inputs convert to \p zero.
705
+ */
706
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
707
+ unsigned char i;
708
+ const float f = float(*this);
709
+ const unsigned char max_val = 0xFFU;
710
+ const unsigned char min_val = 0U;
711
+ const unsigned char bits = (*this).__x;
712
+ // saturation fixup
713
+ if ((bits & 0x7FU) > 0x7CU) {
714
+ // NaN
715
+ i = 0;
716
+ } else if (f > static_cast<float>(max_val)) {
717
+ // saturate maximum
718
+ i = max_val;
719
+ } else if (f < static_cast<float>(min_val)) {
720
+ // saturate minimum
721
+ i = min_val;
722
+ } else {
723
+ // normal value
724
+ i = static_cast<unsigned char>(f);
725
+ }
726
+ return i;
727
+ }
728
+ /**
729
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
730
+ * Conversion operator to \p unsigned \p short \p int data type.
731
+ * Clamps negative and too large inputs to the output range.
732
+ * \p NaN inputs convert to \p zero.
733
+ */
734
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
735
+ return __half2ushort_rz(__half(*this));
736
+ }
737
+ /**
738
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
739
+ * Conversion operator to \p unsigned \p int data type.
740
+ * Clamps negative and too large inputs to the output range.
741
+ * \p NaN inputs convert to \p zero.
742
+ */
743
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
744
+ return __half2uint_rz(__half(*this));
745
+ }
746
+ /**
747
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
748
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
749
+ * Clamps negative and too large inputs to the output range.
750
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
751
+ */
752
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
753
+ return __half2ull_rz(__half(*this));
754
+ }
755
+
756
+ /**
757
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
758
+ * Conversion operator to \p signed \p char data type.
759
+ * Clamps too large inputs to the output range.
760
+ * \p NaN inputs convert to \p zero.
761
+ */
762
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
763
+ signed char i;
764
+ const float f = float(*this);
765
+ const signed char max_val = (signed char)0x7FU;
766
+ const signed char min_val = (signed char)0x80U;
767
+ const unsigned char bits = (*this).__x;
768
+ // saturation fixup
769
+ if ((bits & 0x7FU) > 0x7CU) {
770
+ // NaN
771
+ i = 0;
772
+ } else if (f > static_cast<float>(max_val)) {
773
+ // saturate maximum
774
+ i = max_val;
775
+ } else if (f < static_cast<float>(min_val)) {
776
+ // saturate minimum
777
+ i = min_val;
778
+ } else {
779
+ // normal value
780
+ i = static_cast<signed char>(f);
781
+ }
782
+ return i;
783
+ }
784
+ /**
785
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
786
+ * Conversion operator to \p short \p int data type.
787
+ * Clamps too large inputs to the output range.
788
+ * \p NaN inputs convert to \p zero.
789
+ */
790
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
791
+ return __half2short_rz(__half(*this));
792
+ }
793
+ /**
794
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
795
+ * Conversion operator to \p int data type.
796
+ * Clamps too large inputs to the output range.
797
+ * \p NaN inputs convert to \p zero.
798
+ */
799
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
800
+ return __half2int_rz(__half(*this));
801
+ }
802
+ /**
803
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
804
+ * Conversion operator to \p long \p long \p int data type.
805
+ * Clamps too large inputs to the output range.
806
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
807
+ */
808
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
809
+ return __half2ll_rz(__half(*this));
810
+ }
811
+
812
+ /**
813
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
814
+ * Conversion operator to \p bool data type.
815
+ * +0 and -0 inputs convert to \p false.
816
+ * Non-zero inputs convert to \p true.
817
+ */
818
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
819
+ return (__x & 0x7FU) != 0U;
820
+ }
821
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
822
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
823
+ };
824
+
825
+ /**
826
+ * \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind.
827
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
828
+ */
829
+
830
+ /**
831
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
832
+ * \brief __nv_fp8x2_e5m2 datatype
833
+ *
834
+ * \details This structure implements the datatype for handling two
835
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
836
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
837
+ *
838
+ * The structure implements converting constructors and operators.
839
+ */
840
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 {
841
+ public:
842
+ /**
843
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
844
+ * Storage variable contains the vector of two \p fp8 floating-point data
845
+ * values.
846
+ */
847
+ __nv_fp8x2_storage_t __x;
848
+
849
+ /**
850
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
851
+ * Constructor by default.
852
+ */
853
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
854
+ __nv_fp8x2_e5m2() = default;
855
+ #else
856
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {}
857
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
858
+
859
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
860
+
861
+ /* Construct from wider types */
862
+
863
+ /**
864
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
865
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
866
+ * behavior for out-of-range values.
867
+ */
868
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) {
869
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
870
+ __NV_SATFINITE, __NV_E5M2);
871
+ }
872
+ /**
873
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
874
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
875
+ * behavior for out-of-range values.
876
+ */
877
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) {
878
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
879
+ __NV_SATFINITE, __NV_E5M2);
880
+ }
881
+ /**
882
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
883
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
884
+ * behavior for out-of-range values.
885
+ */
886
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) {
887
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
888
+ }
889
+ /**
890
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
891
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
892
+ * behavior for out-of-range values.
893
+ */
894
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) {
895
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
896
+ }
897
+
898
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
899
+ /* Widening converts */
900
+ /**
901
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
902
+ * Conversion operator to \p __half2 data type.
903
+ */
904
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
905
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
906
+ }
907
+ /**
908
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
909
+ * Conversion operator to \p float2 data type.
910
+ */
911
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
912
+ return __internal_halfraw2_to_float2(
913
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
914
+ }
915
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
916
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
917
+ };
918
+
919
+ __CUDA_HOSTDEVICE_FP8_DECL__ unsigned int
920
+ __internal_pack_u16x2_to_u32(const unsigned short int src_lo,
921
+ const unsigned short int src_hi) {
922
+ unsigned int dst;
923
+ #if (defined __CUDACC__) && (defined __CUDA_ARCH__)
924
+ asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi));
925
+ #else
926
+ dst = (static_cast<unsigned int>(src_hi) << 16U) |
927
+ static_cast<unsigned int>(src_lo);
928
+ #endif
929
+ return dst;
930
+ }
931
+
932
+ /**
933
+ * \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind.
934
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
935
+ */
936
+
937
+ /**
938
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
939
+ * \brief __nv_fp8x4_e5m2 datatype
940
+ *
941
+ * \details This structure implements the datatype for handling four
942
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
943
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
944
+ *
945
+ * The structure implements converting constructors and operators.
946
+ */
947
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 {
948
+ public:
949
+ /**
950
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
951
+ * Storage variable contains the vector of four \p fp8 floating-point data
952
+ * values.
953
+ */
954
+ __nv_fp8x4_storage_t __x;
955
+
956
+ /**
957
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
958
+ * Constructor by default.
959
+ */
960
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
961
+ __nv_fp8x4_e5m2() = default;
962
+ #else
963
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {}
964
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
965
+
966
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
967
+
968
+ /* Construct from wider types */
969
+
970
+ /**
971
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
972
+ * Constructor from a pair of \p __half2 data type values,
973
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
974
+ */
975
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo,
976
+ const __half2 fhi) {
977
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
978
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2);
979
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
980
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
981
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
982
+ }
983
+ /**
984
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
985
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
986
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
987
+ */
988
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo,
989
+ const __nv_bfloat162 fhi) {
990
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
991
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2);
992
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
993
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
994
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
995
+ }
996
+ /**
997
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
998
+ * Constructor from \p float4 vector data type,
999
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1000
+ */
1001
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) {
1002
+ const float2 flo = {f.x, f.y};
1003
+ const float2 fhi = {f.z, f.w};
1004
+ const __nv_fp8x2_storage_t rlo =
1005
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1006
+ const __nv_fp8x2_storage_t rhi =
1007
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1008
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1009
+ }
1010
+ /**
1011
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1012
+ * Constructor from \p double4 vector data type,
1013
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1014
+ */
1015
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) {
1016
+ const double2 flo = {f.x, f.y};
1017
+ const double2 fhi = {f.z, f.w};
1018
+ const __nv_fp8x2_storage_t rlo =
1019
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1020
+ const __nv_fp8x2_storage_t rhi =
1021
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1022
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1023
+ }
1024
+
1025
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1026
+ /* Widening converts */
1027
+
1028
+ /**
1029
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1030
+ * Conversion operator to \p float4 vector data type.
1031
+ */
1032
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1033
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1034
+ const __nv_fp8x2_storage_t shi =
1035
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1036
+ float2 rlo = __internal_halfraw2_to_float2(
1037
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2));
1038
+ float2 rhi = __internal_halfraw2_to_float2(
1039
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2));
1040
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1041
+ return res;
1042
+ }
1043
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1044
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1045
+ };
1046
+
1047
+ /**
1048
+ * \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind.
1049
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1050
+ */
1051
+
1052
+ /**
1053
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1054
+ * \brief __nv_fp8_e4m3 datatype
1055
+ *
1056
+ * \details This structure implements the datatype for storing
1057
+ * \p fp8 floating-point numbers of \p e4m3 kind:
1058
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1059
+ * The encoding doesn't support Infinity.
1060
+ * NaNs are limited to 0x7F and 0xFF values.
1061
+ *
1062
+ * The structure implements converting constructors and operators.
1063
+ */
1064
+ struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 {
1065
+ public:
1066
+ /**
1067
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1068
+ * Storage variable contains the \p fp8 floating-point data.
1069
+ */
1070
+ __nv_fp8_storage_t __x;
1071
+
1072
+ /**
1073
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1074
+ * Constructor by default.
1075
+ */
1076
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1077
+ __nv_fp8_e4m3() = default;
1078
+ #else
1079
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {}
1080
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1081
+
1082
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1083
+
1084
+ /* Construct from wider FP types */
1085
+ /* Note we do avoid constructor init-list because of special host/device
1086
+ * compilation rules */
1087
+
1088
+ /**
1089
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1090
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
1091
+ * behavior for out-of-range values.
1092
+ */
1093
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) {
1094
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
1095
+ __NV_SATFINITE, __NV_E4M3);
1096
+ }
1097
+ /**
1098
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1099
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
1100
+ * behavior for out-of-range values.
1101
+ */
1102
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) {
1103
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
1104
+ __NV_SATFINITE, __NV_E4M3);
1105
+ }
1106
+ /**
1107
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1108
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
1109
+ * for out-of-range values.
1110
+ */
1111
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) {
1112
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1113
+ }
1114
+ /**
1115
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1116
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
1117
+ * behavior for out-of-range values.
1118
+ */
1119
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) {
1120
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1121
+ }
1122
+
1123
+ /* Converts from integral */
1124
+
1125
+ /**
1126
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1127
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
1128
+ * __NV_SATFINITE behavior for out-of-range values.
1129
+ */
1130
+ explicit __CUDA_HOSTDEVICE_FP8__
1131
+ __nv_fp8_e4m3(const unsigned short int val) {
1132
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1133
+ }
1134
+ /**
1135
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1136
+ * Constructor from \p unsigned \p int data type, relies on \p
1137
+ * __NV_SATFINITE behavior for out-of-range values.
1138
+ */
1139
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) {
1140
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1141
+ }
1142
+ /**
1143
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1144
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
1145
+ * \p __NV_SATFINITE behavior for out-of-range values.
1146
+ */
1147
+ explicit __CUDA_HOSTDEVICE_FP8__
1148
+ __nv_fp8_e4m3(const unsigned long long int val) {
1149
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1150
+ }
1151
+
1152
+ /**
1153
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1154
+ * Constructor from \p short \p int data type, relies on \p
1155
+ * __NV_SATFINITE behavior for out-of-range values.
1156
+ */
1157
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) {
1158
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1159
+ }
1160
+ /**
1161
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1162
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
1163
+ * for out-of-range values.
1164
+ */
1165
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) {
1166
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1167
+ }
1168
+ /**
1169
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1170
+ * Constructor from \p long \p long \p int data type, relies on \p
1171
+ * __NV_SATFINITE behavior for out-of-range values.
1172
+ */
1173
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) {
1174
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1175
+ }
1176
+
1177
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1178
+ /* Widening FP converts */
1179
+ /**
1180
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1181
+ * Conversion operator to \p __half data type.
1182
+ */
1183
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
1184
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1185
+ }
1186
+ /**
1187
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1188
+ * Conversion operator to \p float data type.
1189
+ */
1190
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
1191
+ return __internal_halfraw_to_float(
1192
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1193
+ }
1194
+ /**
1195
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1196
+ * Conversion operator to \p __nv_bfloat16 data type.
1197
+ */
1198
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
1199
+ return static_cast<__nv_bfloat16>(
1200
+ __internal_float_to_bf16raw_rz(float(*this)));
1201
+ }
1202
+ /**
1203
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1204
+ * Conversion operator to \p double data type.
1205
+ */
1206
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
1207
+ return static_cast<double>(float(*this));
1208
+ }
1209
+
1210
+ /* Convert to integral */
1211
+
1212
+ /**
1213
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1214
+ * Conversion operator to \p unsigned \p char data type.
1215
+ * Clamps negative and too large inputs to the output range.
1216
+ * \p NaN inputs convert to \p zero.
1217
+ */
1218
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
1219
+ unsigned char i;
1220
+ const float f = float(*this);
1221
+ const unsigned char max_val = 0xFFU;
1222
+ const unsigned char min_val = 0U;
1223
+ const unsigned char bits = (*this).__x;
1224
+ // saturation fixup
1225
+ if ((bits & 0x7FU) == 0x7FU) {
1226
+ // NaN
1227
+ i = 0;
1228
+ } else if (f > static_cast<float>(max_val)) {
1229
+ // saturate maximum
1230
+ i = max_val;
1231
+ } else if (f < static_cast<float>(min_val)) {
1232
+ // saturate minimum
1233
+ i = min_val;
1234
+ } else {
1235
+ // normal value
1236
+ i = static_cast<unsigned char>(f);
1237
+ }
1238
+ return i;
1239
+ }
1240
+
1241
+ /**
1242
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1243
+ * Conversion operator to \p unsigned \p short \p int data type.
1244
+ * Clamps negative inputs to zero.
1245
+ * \p NaN inputs convert to \p zero.
1246
+ */
1247
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
1248
+ return __half2ushort_rz(__half(*this));
1249
+ }
1250
+ /**
1251
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1252
+ * Conversion operator to \p unsigned \p int data type.
1253
+ * Clamps negative inputs to zero.
1254
+ * \p NaN inputs convert to \p zero.
1255
+ */
1256
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
1257
+ return __half2uint_rz(__half(*this));
1258
+ }
1259
+ /**
1260
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1261
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
1262
+ * Clamps negative inputs to zero.
1263
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
1264
+ */
1265
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
1266
+ return __half2ull_rz(__half(*this));
1267
+ }
1268
+
1269
+ /**
1270
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1271
+ * Conversion operator to \p signed \p char data type.
1272
+ * Clamps too large inputs to the output range.
1273
+ * \p NaN inputs convert to \p zero.
1274
+ */
1275
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
1276
+ signed char i;
1277
+ const float f = float(*this);
1278
+ const signed char max_val = (signed char)0x7FU;
1279
+ const signed char min_val = (signed char)0x80U;
1280
+ const unsigned char bits = (*this).__x;
1281
+ // saturation fixup
1282
+ if ((bits & 0x7FU) == 0x7FU) {
1283
+ // NaN
1284
+ i = 0;
1285
+ } else if (f > static_cast<float>(max_val)) {
1286
+ // saturate maximum
1287
+ i = max_val;
1288
+ } else if (f < static_cast<float>(min_val)) {
1289
+ // saturate minimum
1290
+ i = min_val;
1291
+ } else {
1292
+ // normal value
1293
+ i = static_cast<signed char>(f);
1294
+ }
1295
+ return i;
1296
+ }
1297
+ /**
1298
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1299
+ * Conversion operator to \p short \p int data type.
1300
+ * \p NaN inputs convert to \p zero.
1301
+ */
1302
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
1303
+ return __half2short_rz(__half(*this));
1304
+ }
1305
+ /**
1306
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1307
+ * Conversion operator to \p int data type.
1308
+ * \p NaN inputs convert to \p zero.
1309
+ */
1310
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
1311
+ return __half2int_rz(__half(*this));
1312
+ }
1313
+ /**
1314
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1315
+ * Conversion operator to \p long \p long \p int data type.
1316
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
1317
+ */
1318
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
1319
+ return __half2ll_rz(__half(*this));
1320
+ }
1321
+
1322
+ /**
1323
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1324
+ * Conversion operator to \p bool data type.
1325
+ * +0 and -0 inputs convert to \p false.
1326
+ * Non-zero inputs convert to \p true.
1327
+ */
1328
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
1329
+ return (__x & 0x7FU) != 0U;
1330
+ }
1331
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1332
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1333
+ };
1334
+
1335
+ /**
1336
+ * \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind.
1337
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1338
+ */
1339
+
1340
+ /**
1341
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1342
+ * \brief __nv_fp8x2_e4m3 datatype
1343
+ *
1344
+ * \details This structure implements the datatype for storage
1345
+ * and operations on the vector of two \p fp8 values of \p e4m3 kind each:
1346
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1347
+ * The encoding doesn't support Infinity.
1348
+ * NaNs are limited to 0x7F and 0xFF values.
1349
+ */
1350
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 {
1351
+ public:
1352
+ /**
1353
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1354
+ * Storage variable contains the vector of two \p fp8 floating-point data
1355
+ * values.
1356
+ */
1357
+ __nv_fp8x2_storage_t __x;
1358
+
1359
+ /**
1360
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1361
+ * Constructor by default.
1362
+ */
1363
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1364
+ __nv_fp8x2_e4m3() = default;
1365
+ #else
1366
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {}
1367
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1368
+
1369
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1370
+
1371
+ /* Construct from wider types */
1372
+
1373
+ /**
1374
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1375
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
1376
+ * behavior for out-of-range values.
1377
+ */
1378
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) {
1379
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
1380
+ __NV_SATFINITE, __NV_E4M3);
1381
+ }
1382
+ /**
1383
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1384
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
1385
+ * behavior for out-of-range values.
1386
+ */
1387
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) {
1388
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
1389
+ __NV_SATFINITE, __NV_E4M3);
1390
+ }
1391
+ /**
1392
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1393
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
1394
+ * behavior for out-of-range values.
1395
+ */
1396
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) {
1397
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1398
+ }
1399
+ /**
1400
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1401
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
1402
+ * behavior for out-of-range values.
1403
+ */
1404
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) {
1405
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1406
+ }
1407
+
1408
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1409
+ /* Widening converts */
1410
+ /**
1411
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1412
+ * Conversion operator to \p __half2 data type.
1413
+ */
1414
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
1415
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1416
+ }
1417
+ /**
1418
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1419
+ * Conversion operator to \p float2 data type.
1420
+ */
1421
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
1422
+ return __internal_halfraw2_to_float2(
1423
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1424
+ }
1425
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1426
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1427
+ };
1428
+
1429
+ /**
1430
+ * \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind.
1431
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1432
+ */
1433
+
1434
+ /**
1435
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1436
+ * \brief __nv_fp8x4_e4m3 datatype
1437
+ *
1438
+ * \details This structure implements the datatype for storage
1439
+ * and operations on the vector of four \p fp8 values of \p e4m3 kind each:
1440
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1441
+ * The encoding doesn't support Infinity.
1442
+ * NaNs are limited to 0x7F and 0xFF values.
1443
+ */
1444
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 {
1445
+ public:
1446
+ /**
1447
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1448
+ * Storage variable contains the vector of four \p fp8 floating-point data
1449
+ * values.
1450
+ */
1451
+ __nv_fp8x4_storage_t __x;
1452
+
1453
+ /**
1454
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1455
+ * Constructor by default.
1456
+ */
1457
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1458
+ __nv_fp8x4_e4m3() = default;
1459
+ #else
1460
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {}
1461
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1462
+
1463
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1464
+
1465
+ /* Construct from wider types */
1466
+
1467
+ /**
1468
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1469
+ * Constructor from a pair of \p __half2 data type values,
1470
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1471
+ */
1472
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo,
1473
+ const __half2 fhi) {
1474
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
1475
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1476
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
1477
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1478
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1479
+ }
1480
+ /**
1481
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1482
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
1483
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1484
+ */
1485
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo,
1486
+ const __nv_bfloat162 fhi) {
1487
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
1488
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1489
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
1490
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1491
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1492
+ }
1493
+ /**
1494
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1495
+ * Constructor from \p float4 vector data type,
1496
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1497
+ */
1498
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) {
1499
+ const float2 flo = {f.x, f.y};
1500
+ const float2 fhi = {f.z, f.w};
1501
+ const __nv_fp8x2_storage_t rlo =
1502
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1503
+ const __nv_fp8x2_storage_t rhi =
1504
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1505
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1506
+ }
1507
+ /**
1508
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1509
+ * Constructor from \p double4 vector data type,
1510
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1511
+ */
1512
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) {
1513
+ const double2 flo = {f.x, f.y};
1514
+ const double2 fhi = {f.z, f.w};
1515
+ const __nv_fp8x2_storage_t rlo =
1516
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1517
+ const __nv_fp8x2_storage_t rhi =
1518
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1519
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1520
+ }
1521
+
1522
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1523
+ /* Widening converts */
1524
+
1525
+ /**
1526
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1527
+ * Conversion operator to \p float4 vector data type.
1528
+ */
1529
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1530
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1531
+ const __nv_fp8x2_storage_t shi =
1532
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1533
+ float2 rlo = __internal_halfraw2_to_float2(
1534
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3));
1535
+ float2 rhi = __internal_halfraw2_to_float2(
1536
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3));
1537
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1538
+ return res;
1539
+ }
1540
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1541
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1542
+ };
1543
+
1544
+ #endif /* defined(__cplusplus) */
1545
+
1546
+ #endif /* end of include guard: __CUDA_FP8_HPP__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_GL_INTEROP_H__)
51
+ #define __CUDA_GL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #if defined(__APPLE__)
56
+
57
+ #include <OpenGL/gl.h>
58
+
59
+ #else /* __APPLE__ */
60
+
61
+ #if defined(__arm__) || defined(__aarch64__)
62
+ #ifndef GL_VERSION
63
+ #error Please include the appropriate gl headers before including cuda_gl_interop.h
64
+ #endif
65
+ #else
66
+ #include <GL/gl.h>
67
+ #endif
68
+
69
+ #endif /* __APPLE__ */
70
+
71
+ /** \cond impl_private */
72
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
73
+ #define __CUDA_DEPRECATED
74
+ #elif defined(_MSC_VER)
75
+ #define __CUDA_DEPRECATED __declspec(deprecated)
76
+ #elif defined(__GNUC__)
77
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
78
+ #else
79
+ #define __CUDA_DEPRECATED
80
+ #endif
81
+ /** \endcond impl_private */
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif /* __cplusplus */
86
+
87
+ /**
88
+ * \addtogroup CUDART_OPENGL OpenGL Interoperability
89
+ * This section describes the OpenGL interoperability functions of the CUDA
90
+ * runtime application programming interface. Note that mapping of OpenGL
91
+ * resources is performed with the graphics API agnostic, resource mapping
92
+ * interface described in \ref CUDART_INTEROP "Graphics Interopability".
93
+ *
94
+ * @{
95
+ */
96
+
97
+ /**
98
+ * CUDA devices corresponding to the current OpenGL context
99
+ */
100
+ enum cudaGLDeviceList
101
+ {
102
+ cudaGLDeviceListAll = 1, /**< The CUDA devices for all GPUs used by the current OpenGL context */
103
+ cudaGLDeviceListCurrentFrame = 2, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
104
+ cudaGLDeviceListNextFrame = 3 /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
105
+ };
106
+
107
+ /**
108
+ * \brief Gets the CUDA devices associated with the current OpenGL context
109
+ *
110
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
111
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
112
+ * at most \p cudaDeviceCount of the CUDA-compatible devices corresponding to
113
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
114
+ * context are not CUDA capable then the call will return ::cudaErrorNoDevice.
115
+ *
116
+ * \param pCudaDeviceCount - Returned number of CUDA devices corresponding to the
117
+ * current OpenGL context
118
+ * \param pCudaDevices - Returned CUDA devices corresponding to the current
119
+ * OpenGL context
120
+ * \param cudaDeviceCount - The size of the output device array \p pCudaDevices
121
+ * \param deviceList - The set of devices to return. This set may be
122
+ * ::cudaGLDeviceListAll for all devices,
123
+ * ::cudaGLDeviceListCurrentFrame for the devices used to
124
+ * render the current frame (in SLI), or
125
+ * ::cudaGLDeviceListNextFrame for the devices used to
126
+ * render the next frame (in SLI).
127
+ *
128
+ * \return
129
+ * ::cudaSuccess,
130
+ * ::cudaErrorNoDevice,
131
+ * ::cudaErrorInvalidGraphicsContext,
132
+ * ::cudaErrorOperatingSystem,
133
+ * ::cudaErrorUnknown
134
+ *
135
+ * \note This function is not supported on Mac OS X.
136
+ * \notefnerr
137
+ *
138
+ * \sa
139
+ * ::cudaGraphicsUnregisterResource,
140
+ * ::cudaGraphicsMapResources,
141
+ * ::cudaGraphicsSubResourceGetMappedArray,
142
+ * ::cudaGraphicsResourceGetMappedPointer,
143
+ * ::cuGLGetDevices
144
+ */
145
+ extern __host__ cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices, unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList);
146
+
147
+ /**
148
+ * \brief Register an OpenGL texture or renderbuffer object
149
+ *
150
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
151
+ * A handle to the registered object is returned as \p resource.
152
+ *
153
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
154
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
155
+ * or ::GL_RENDERBUFFER.
156
+ *
157
+ * The register flags \p flags specify the intended usage, as follows:
158
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
159
+ * resource will be used. It is therefore assumed that this resource will be
160
+ * read from and written to by CUDA. This is the default value.
161
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
162
+ * will not write to this resource.
163
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
164
+ * CUDA will not read from this resource and will write over the
165
+ * entire contents of the resource, so none of the data previously
166
+ * stored in the resource will be preserved.
167
+ * - ::cudaGraphicsRegisterFlagsSurfaceLoadStore: Specifies that CUDA will
168
+ * bind this resource to a surface reference.
169
+ * - ::cudaGraphicsRegisterFlagsTextureGather: Specifies that CUDA will perform
170
+ * texture gather operations on this resource.
171
+ *
172
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
173
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
174
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
175
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
176
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
177
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
178
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
179
+ *
180
+ * The following image classes are currently disallowed:
181
+ * - Textures with borders
182
+ * - Multisampled renderbuffers
183
+ *
184
+ * \param resource - Pointer to the returned object handle
185
+ * \param image - name of texture or renderbuffer object to be registered
186
+ * \param target - Identifies the type of object specified by \p image
187
+ * \param flags - Register flags
188
+ *
189
+ * \return
190
+ * ::cudaSuccess,
191
+ * ::cudaErrorInvalidDevice,
192
+ * ::cudaErrorInvalidValue,
193
+ * ::cudaErrorInvalidResourceHandle,
194
+ * ::cudaErrorOperatingSystem,
195
+ * ::cudaErrorUnknown
196
+ * \notefnerr
197
+ *
198
+ * \sa
199
+ * ::cudaGraphicsUnregisterResource,
200
+ * ::cudaGraphicsMapResources,
201
+ * ::cudaGraphicsSubResourceGetMappedArray,
202
+ * ::cuGraphicsGLRegisterImage
203
+ */
204
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(struct cudaGraphicsResource **resource, GLuint image, GLenum target, unsigned int flags);
205
+
206
+ /**
207
+ * \brief Registers an OpenGL buffer object
208
+ *
209
+ * Registers the buffer object specified by \p buffer for access by
210
+ * CUDA. A handle to the registered object is returned as \p
211
+ * resource. The register flags \p flags specify the intended usage,
212
+ * as follows:
213
+ *
214
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
215
+ * resource will be used. It is therefore assumed that this resource will be
216
+ * read from and written to by CUDA. This is the default value.
217
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
218
+ * will not write to this resource.
219
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
220
+ * CUDA will not read from this resource and will write over the
221
+ * entire contents of the resource, so none of the data previously
222
+ * stored in the resource will be preserved.
223
+ *
224
+ * \param resource - Pointer to the returned object handle
225
+ * \param buffer - name of buffer object to be registered
226
+ * \param flags - Register flags
227
+ *
228
+ * \return
229
+ * ::cudaSuccess,
230
+ * ::cudaErrorInvalidDevice,
231
+ * ::cudaErrorInvalidValue,
232
+ * ::cudaErrorInvalidResourceHandle,
233
+ * ::cudaErrorOperatingSystem,
234
+ * ::cudaErrorUnknown
235
+ * \notefnerr
236
+ *
237
+ * \sa
238
+ * ::cudaGraphicsUnregisterResource,
239
+ * ::cudaGraphicsMapResources,
240
+ * ::cudaGraphicsResourceGetMappedPointer,
241
+ * ::cuGraphicsGLRegisterBuffer
242
+ */
243
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(struct cudaGraphicsResource **resource, GLuint buffer, unsigned int flags);
244
+
245
+ #ifdef _WIN32
246
+ #ifndef WGL_NV_gpu_affinity
247
+ typedef void* HGPUNV;
248
+ #endif
249
+
250
+ /**
251
+ * \brief Gets the CUDA device associated with hGpu
252
+ *
253
+ * Returns the CUDA device associated with a hGpu, if applicable.
254
+ *
255
+ * \param device - Returns the device associated with hGpu, or -1 if hGpu is
256
+ * not a compute device.
257
+ * \param hGpu - Handle to a GPU, as queried via WGL_NV_gpu_affinity
258
+ *
259
+ * \return
260
+ * ::cudaSuccess
261
+ * \notefnerr
262
+ *
263
+ * \sa
264
+ * ::WGL_NV_gpu_affinity,
265
+ * ::cuWGLGetDevice
266
+ */
267
+ extern __host__ cudaError_t CUDARTAPI cudaWGLGetDevice(int *device, HGPUNV hGpu);
268
+ #endif
269
+
270
+ /** @} */ /* END CUDART_OPENGL */
271
+
272
+ /**
273
+ * \addtogroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED]
274
+ * This section describes deprecated OpenGL interoperability functionality.
275
+ *
276
+ * @{
277
+ */
278
+
279
+ /**
280
+ * CUDA GL Map Flags
281
+ */
282
+ enum cudaGLMapFlags
283
+ {
284
+ cudaGLMapFlagsNone = 0, /**< Default; Assume resource can be read/written */
285
+ cudaGLMapFlagsReadOnly = 1, /**< CUDA kernels will not write to this resource */
286
+ cudaGLMapFlagsWriteDiscard = 2 /**< CUDA kernels will only write to and will not read from this resource */
287
+ };
288
+
289
+ /**
290
+ * \brief Sets a CUDA device to use OpenGL interoperability
291
+ *
292
+ * \deprecated This function is deprecated as of CUDA 5.0.
293
+ *
294
+ * This function is deprecated and should no longer be used. It is
295
+ * no longer necessary to associate a CUDA device with an OpenGL
296
+ * context in order to achieve maximum interoperability performance.
297
+ *
298
+ * This function will immediately initialize the primary context on
299
+ * \p device if needed.
300
+ *
301
+ * \param device - Device to use for OpenGL interoperability
302
+ *
303
+ * \return
304
+ * ::cudaSuccess,
305
+ * ::cudaErrorInvalidDevice,
306
+ * ::cudaErrorSetOnActiveProcess
307
+ * \notefnerr
308
+ *
309
+ * \sa ::cudaGraphicsGLRegisterBuffer, ::cudaGraphicsGLRegisterImage
310
+ */
311
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetGLDevice(int device);
312
+
313
+ /**
314
+ * \brief Registers a buffer object for access by CUDA
315
+ *
316
+ * \deprecated This function is deprecated as of CUDA 3.0.
317
+ *
318
+ * Registers the buffer object of ID \p bufObj for access by
319
+ * CUDA. This function must be called before CUDA can map the buffer
320
+ * object. The OpenGL context used to create the buffer, or another
321
+ * context from the same share group, must be bound to the current
322
+ * thread when this is called.
323
+ *
324
+ * \param bufObj - Buffer object ID to register
325
+ *
326
+ * \return
327
+ * ::cudaSuccess,
328
+ * ::cudaErrorInitializationError
329
+ * \notefnerr
330
+ *
331
+ * \sa ::cudaGraphicsGLRegisterBuffer
332
+ */
333
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLRegisterBufferObject(GLuint bufObj);
334
+
335
+ /**
336
+ * \brief Maps a buffer object for access by CUDA
337
+ *
338
+ * \deprecated This function is deprecated as of CUDA 3.0.
339
+ *
340
+ * Maps the buffer object of ID \p bufObj into the address space of
341
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
342
+ * mapping. The buffer must have previously been registered by
343
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
344
+ * by CUDA, any OpenGL operation which references the buffer will
345
+ * result in undefined behavior. The OpenGL context used to create
346
+ * the buffer, or another context from the same share group, must be
347
+ * bound to the current thread when this is called.
348
+ *
349
+ * All streams in the current thread are synchronized with the current
350
+ * GL context.
351
+ *
352
+ * \param devPtr - Returned device pointer to CUDA object
353
+ * \param bufObj - Buffer object ID to map
354
+ *
355
+ * \return
356
+ * ::cudaSuccess,
357
+ * ::cudaErrorMapBufferObjectFailed
358
+ * \notefnerr
359
+ *
360
+ * \sa ::cudaGraphicsMapResources
361
+ */
362
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObject(void **devPtr, GLuint bufObj);
363
+
364
+ /**
365
+ * \brief Unmaps a buffer object for access by CUDA
366
+ *
367
+ * \deprecated This function is deprecated as of CUDA 3.0.
368
+ *
369
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
370
+ * a buffer is unmapped, the base address returned by
371
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
372
+ * the address result in undefined behavior. The OpenGL context used
373
+ * to create the buffer, or another context from the same share group,
374
+ * must be bound to the current thread when this is called.
375
+ *
376
+ * All streams in the current thread are synchronized with the current
377
+ * GL context.
378
+ *
379
+ * \param bufObj - Buffer object to unmap
380
+ *
381
+ * \return
382
+ * ::cudaSuccess,
383
+ * ::cudaErrorUnmapBufferObjectFailed
384
+ * \notefnerr
385
+ *
386
+ * \sa ::cudaGraphicsUnmapResources
387
+ */
388
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObject(GLuint bufObj);
389
+
390
+ /**
391
+ * \brief Unregisters a buffer object for access by CUDA
392
+ *
393
+ * \deprecated This function is deprecated as of CUDA 3.0.
394
+ *
395
+ * Unregisters the buffer object of ID \p bufObj for access by CUDA
396
+ * and releases any CUDA resources associated with the buffer. Once a
397
+ * buffer is unregistered, it may no longer be mapped by CUDA. The GL
398
+ * context used to create the buffer, or another context from the
399
+ * same share group, must be bound to the current thread when this is
400
+ * called.
401
+ *
402
+ * \param bufObj - Buffer object to unregister
403
+ *
404
+ * \return
405
+ * ::cudaSuccess
406
+ * \notefnerr
407
+ *
408
+ * \sa ::cudaGraphicsUnregisterResource
409
+ */
410
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnregisterBufferObject(GLuint bufObj);
411
+
412
+ /**
413
+ * \brief Set usage flags for mapping an OpenGL buffer
414
+ *
415
+ * \deprecated This function is deprecated as of CUDA 3.0.
416
+ *
417
+ * Set flags for mapping the OpenGL buffer \p bufObj
418
+ *
419
+ * Changes to flags will take effect the next time \p bufObj is mapped.
420
+ * The \p flags argument may be any of the following:
421
+ *
422
+ * - ::cudaGLMapFlagsNone: Specifies no hints about how this buffer will
423
+ * be used. It is therefore assumed that this buffer will be read from and
424
+ * written to by CUDA kernels. This is the default value.
425
+ * - ::cudaGLMapFlagsReadOnly: Specifies that CUDA kernels which access this
426
+ * buffer will not write to the buffer.
427
+ * - ::cudaGLMapFlagsWriteDiscard: Specifies that CUDA kernels which access
428
+ * this buffer will not read from the buffer and will write over the
429
+ * entire contents of the buffer, so none of the data previously stored in
430
+ * the buffer will be preserved.
431
+ *
432
+ * If \p bufObj has not been registered for use with CUDA, then
433
+ * ::cudaErrorInvalidResourceHandle is returned. If \p bufObj is presently
434
+ * mapped for access by CUDA, then ::cudaErrorUnknown is returned.
435
+ *
436
+ * \param bufObj - Registered buffer object to set flags for
437
+ * \param flags - Parameters for buffer mapping
438
+ *
439
+ * \return
440
+ * ::cudaSuccess,
441
+ * ::cudaErrorInvalidValue,
442
+ * ::cudaErrorInvalidResourceHandle,
443
+ * ::cudaErrorUnknown
444
+ * \notefnerr
445
+ *
446
+ * \sa ::cudaGraphicsResourceSetMapFlags
447
+ */
448
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetBufferObjectMapFlags(GLuint bufObj, unsigned int flags);
449
+
450
+ /**
451
+ * \brief Maps a buffer object for access by CUDA
452
+ *
453
+ * \deprecated This function is deprecated as of CUDA 3.0.
454
+ *
455
+ * Maps the buffer object of ID \p bufObj into the address space of
456
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
457
+ * mapping. The buffer must have previously been registered by
458
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
459
+ * by CUDA, any OpenGL operation which references the buffer will
460
+ * result in undefined behavior. The OpenGL context used to create
461
+ * the buffer, or another context from the same share group, must be
462
+ * bound to the current thread when this is called.
463
+ *
464
+ * Stream /p stream is synchronized with the current GL context.
465
+ *
466
+ * \param devPtr - Returned device pointer to CUDA object
467
+ * \param bufObj - Buffer object ID to map
468
+ * \param stream - Stream to synchronize
469
+ *
470
+ * \return
471
+ * ::cudaSuccess,
472
+ * ::cudaErrorMapBufferObjectFailed
473
+ * \notefnerr
474
+ *
475
+ * \sa ::cudaGraphicsMapResources
476
+ */
477
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObjectAsync(void **devPtr, GLuint bufObj, cudaStream_t stream);
478
+
479
+ /**
480
+ * \brief Unmaps a buffer object for access by CUDA
481
+ *
482
+ * \deprecated This function is deprecated as of CUDA 3.0.
483
+ *
484
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
485
+ * a buffer is unmapped, the base address returned by
486
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
487
+ * the address result in undefined behavior. The OpenGL context used
488
+ * to create the buffer, or another context from the same share group,
489
+ * must be bound to the current thread when this is called.
490
+ *
491
+ * Stream /p stream is synchronized with the current GL context.
492
+ *
493
+ * \param bufObj - Buffer object to unmap
494
+ * \param stream - Stream to synchronize
495
+ *
496
+ * \return
497
+ * ::cudaSuccess,
498
+ * ::cudaErrorUnmapBufferObjectFailed
499
+ * \notefnerr
500
+ *
501
+ * \sa ::cudaGraphicsUnmapResources
502
+ */
503
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObjectAsync(GLuint bufObj, cudaStream_t stream);
504
+
505
+ /** @} */ /* END CUDART_OPENGL_DEPRECATED */
506
+
507
+ #if defined(__cplusplus)
508
+ }
509
+ #endif /* __cplusplus */
510
+
511
+ #undef __CUDA_DEPRECATED
512
+
513
+ #endif /* __CUDA_GL_INTEROP_H__ */
514
+
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h ADDED
@@ -0,0 +1,1958 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * CUDA Occupancy Calculator
52
+ *
53
+ * NAME
54
+ *
55
+ * cudaOccMaxActiveBlocksPerMultiprocessor,
56
+ * cudaOccMaxPotentialOccupancyBlockSize,
57
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSMem
58
+ * cudaOccAvailableDynamicSMemPerBlock
59
+ *
60
+ * DESCRIPTION
61
+ *
62
+ * The CUDA occupancy calculator provides a standalone, programmatical
63
+ * interface to compute the occupancy of a function on a device. It can also
64
+ * provide occupancy-oriented launch configuration suggestions.
65
+ *
66
+ * The function and device are defined by the user through
67
+ * cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState
68
+ * structures. All APIs require all 3 of them.
69
+ *
70
+ * See the structure definition for more details about the device / function
71
+ * descriptors.
72
+ *
73
+ * See each API's prototype for API usage.
74
+ *
75
+ * COMPATIBILITY
76
+ *
77
+ * The occupancy calculator will be updated on each major CUDA toolkit
78
+ * release. It does not provide forward compatibility, i.e. new hardwares
79
+ * released after this implementation's release will not be supported.
80
+ *
81
+ * NOTE
82
+ *
83
+ * If there is access to CUDA runtime, and the sole intent is to calculate
84
+ * occupancy related values on one of the accessible CUDA devices, using CUDA
85
+ * runtime's occupancy calculation APIs is recommended.
86
+ *
87
+ */
88
+
89
+ #ifndef __cuda_occupancy_h__
90
+ #define __cuda_occupancy_h__
91
+
92
+ #include <stddef.h>
93
+ #include <limits.h>
94
+ #include <string.h>
95
+
96
+
97
+ // __OCC_INLINE will be undefined at the end of this header
98
+ //
99
+ #ifdef __CUDACC__
100
+ #define __OCC_INLINE inline __host__ __device__
101
+ #elif defined _MSC_VER
102
+ #define __OCC_INLINE __inline
103
+ #else // GNUCC assumed
104
+ #define __OCC_INLINE inline
105
+ #endif
106
+
107
+ enum cudaOccError_enum {
108
+ CUDA_OCC_SUCCESS = 0, // no error encountered
109
+ CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid
110
+ CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in
111
+ // current implementation or device is
112
+ // invalid
113
+ };
114
+ typedef enum cudaOccError_enum cudaOccError;
115
+
116
+ typedef struct cudaOccResult cudaOccResult;
117
+ typedef struct cudaOccDeviceProp cudaOccDeviceProp;
118
+ typedef struct cudaOccFuncAttributes cudaOccFuncAttributes;
119
+ typedef struct cudaOccDeviceState cudaOccDeviceState;
120
+
121
+ /**
122
+ * The CUDA occupancy calculator computes the occupancy of the function
123
+ * described by attributes with the given block size (blockSize), static device
124
+ * properties (properties), dynamic device states (states) and per-block dynamic
125
+ * shared memory allocation (dynamicSMemSize) in bytes, and output it through
126
+ * result along with other useful information. The occupancy is computed in
127
+ * terms of the maximum number of active blocks per multiprocessor. The user can
128
+ * then convert it to other metrics, such as number of active warps.
129
+ *
130
+ * RETURN VALUE
131
+ *
132
+ * The occupancy and related information is returned through result.
133
+ *
134
+ * If result->activeBlocksPerMultiprocessor is 0, then the given parameter
135
+ * combination cannot run on the device.
136
+ *
137
+ * ERRORS
138
+ *
139
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
140
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
141
+ * current implementation or device is invalid
142
+ */
143
+ static __OCC_INLINE
144
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
145
+ cudaOccResult *result, // out
146
+ const cudaOccDeviceProp *properties, // in
147
+ const cudaOccFuncAttributes *attributes, // in
148
+ const cudaOccDeviceState *state, // in
149
+ int blockSize, // in
150
+ size_t dynamicSmemSize); // in
151
+
152
+ /**
153
+ * The CUDA launch configurator C API suggests a grid / block size pair (in
154
+ * minGridSize and blockSize) that achieves the best potential occupancy
155
+ * (i.e. maximum number of active warps with the smallest number of blocks) for
156
+ * the given function described by attributes, on a device described by
157
+ * properties with settings in state.
158
+ *
159
+ * If per-block dynamic shared memory allocation is not needed, the user should
160
+ * leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0.
161
+ *
162
+ * If per-block dynamic shared memory allocation is needed, then if the dynamic
163
+ * shared memory size is constant regardless of block size, the size should be
164
+ * passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be
165
+ * NULL.
166
+ *
167
+ * Otherwise, if the per-block dynamic shared memory size varies with different
168
+ * block sizes, the user needs to provide a pointer to an unary function through
169
+ * blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by
170
+ * a block of the function for any given block size. dynamicSMemSize is
171
+ * ignored. An example signature is:
172
+ *
173
+ * // Take block size, returns dynamic shared memory needed
174
+ * size_t blockToSmem(int blockSize);
175
+ *
176
+ * RETURN VALUE
177
+ *
178
+ * The suggested block size and the minimum number of blocks needed to achieve
179
+ * the maximum occupancy are returned through blockSize and minGridSize.
180
+ *
181
+ * If *blockSize is 0, then the given combination cannot run on the device.
182
+ *
183
+ * ERRORS
184
+ *
185
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
186
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
187
+ * current implementation or device is invalid
188
+ *
189
+ */
190
+ static __OCC_INLINE
191
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
192
+ int *minGridSize, // out
193
+ int *blockSize, // out
194
+ const cudaOccDeviceProp *properties, // in
195
+ const cudaOccFuncAttributes *attributes, // in
196
+ const cudaOccDeviceState *state, // in
197
+ size_t (*blockSizeToDynamicSMemSize)(int), // in
198
+ size_t dynamicSMemSize); // in
199
+
200
+ /**
201
+ * The CUDA launch configurator C++ API suggests a grid / block size pair (in
202
+ * minGridSize and blockSize) that achieves the best potential occupancy
203
+ * (i.e. the maximum number of active warps with the smallest number of blocks)
204
+ * for the given function described by attributes, on a device described by
205
+ * properties with settings in state.
206
+ *
207
+ * If per-block dynamic shared memory allocation is 0 or constant regardless of
208
+ * block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to
209
+ * configure the launch. A constant dynamic shared memory allocation size in
210
+ * bytes can be passed through dynamicSMemSize.
211
+ *
212
+ * Otherwise, if the per-block dynamic shared memory size varies with different
213
+ * block sizes, the user needs to use
214
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a
215
+ * functor / pointer to an unary function (blockSizeToDynamicSMemSize) that
216
+ * computes the dynamic shared memory needed by func for any given block
217
+ * size. An example signature is:
218
+ *
219
+ * // Take block size, returns per-block dynamic shared memory needed
220
+ * size_t blockToSmem(int blockSize);
221
+ *
222
+ * RETURN VALUE
223
+ *
224
+ * The suggested block size and the minimum number of blocks needed to achieve
225
+ * the maximum occupancy are returned through blockSize and minGridSize.
226
+ *
227
+ * If *blockSize is 0, then the given combination cannot run on the device.
228
+ *
229
+ * ERRORS
230
+ *
231
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
232
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
233
+ * current implementation or device is invalid
234
+ *
235
+ */
236
+
237
+ #if defined(__cplusplus)
238
+ namespace {
239
+
240
+ __OCC_INLINE
241
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
242
+ int *minGridSize, // out
243
+ int *blockSize, // out
244
+ const cudaOccDeviceProp *properties, // in
245
+ const cudaOccFuncAttributes *attributes, // in
246
+ const cudaOccDeviceState *state, // in
247
+ size_t dynamicSMemSize = 0); // in
248
+
249
+ template <typename UnaryFunction>
250
+ __OCC_INLINE
251
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
252
+ int *minGridSize, // out
253
+ int *blockSize, // out
254
+ const cudaOccDeviceProp *properties, // in
255
+ const cudaOccFuncAttributes *attributes, // in
256
+ const cudaOccDeviceState *state, // in
257
+ UnaryFunction blockSizeToDynamicSMemSize); // in
258
+
259
+ } // namespace anonymous
260
+ #endif // defined(__cplusplus)
261
+
262
+ /**
263
+ *
264
+ * The CUDA dynamic shared memory calculator computes the maximum size of
265
+ * per-block dynamic shared memory if we want to place numBlocks blocks
266
+ * on an SM.
267
+ *
268
+ * RETURN VALUE
269
+ *
270
+ * Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow
271
+ * numBlocks blocks per SM.
272
+ *
273
+ * ERRORS
274
+ *
275
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
276
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
277
+ * current implementation or device is invalid
278
+ *
279
+ */
280
+ static __OCC_INLINE
281
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
282
+ size_t *dynamicSmemSize,
283
+ const cudaOccDeviceProp *properties,
284
+ const cudaOccFuncAttributes *attributes,
285
+ const cudaOccDeviceState *state,
286
+ int numBlocks,
287
+ int blockSize);
288
+
289
+ /**
290
+ * Data structures
291
+ *
292
+ * These structures are subject to change for future architecture and CUDA
293
+ * releases. C users should initialize the structure as {0}.
294
+ *
295
+ */
296
+
297
+ /**
298
+ * Device descriptor
299
+ *
300
+ * This structure describes a device.
301
+ */
302
+ struct cudaOccDeviceProp {
303
+ int computeMajor; // Compute capability major version
304
+ int computeMinor; // Compute capability minor
305
+ // version. None supported minor version
306
+ // may cause error
307
+ int maxThreadsPerBlock; // Maximum number of threads per block
308
+ int maxThreadsPerMultiprocessor; // Maximum number of threads per SM
309
+ // i.e. (Max. number of warps) x (warp
310
+ // size)
311
+ int regsPerBlock; // Maximum number of registers per block
312
+ int regsPerMultiprocessor; // Maximum number of registers per SM
313
+ int warpSize; // Warp size
314
+ size_t sharedMemPerBlock; // Maximum shared memory size per block
315
+ size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM
316
+ int numSms; // Number of SMs available
317
+ size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block
318
+ size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver
319
+
320
+ #ifdef __cplusplus
321
+ // This structure can be converted from a cudaDeviceProp structure for users
322
+ // that use this header in their CUDA applications.
323
+ //
324
+ // If the application have access to the CUDA Runtime API, the application
325
+ // can obtain the device properties of a CUDA device through
326
+ // cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the
327
+ // cudaDeviceProp structure.
328
+ //
329
+ // Example:
330
+ /*
331
+ {
332
+ cudaDeviceProp prop;
333
+
334
+ cudaGetDeviceProperties(&prop, ...);
335
+
336
+ cudaOccDeviceProp occProp = prop;
337
+
338
+ ...
339
+
340
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...);
341
+ }
342
+ */
343
+ //
344
+ template<typename DeviceProp>
345
+ __OCC_INLINE
346
+ cudaOccDeviceProp(const DeviceProp &props)
347
+ : computeMajor (props.major),
348
+ computeMinor (props.minor),
349
+ maxThreadsPerBlock (props.maxThreadsPerBlock),
350
+ maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor),
351
+ regsPerBlock (props.regsPerBlock),
352
+ regsPerMultiprocessor (props.regsPerMultiprocessor),
353
+ warpSize (props.warpSize),
354
+ sharedMemPerBlock (props.sharedMemPerBlock),
355
+ sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor),
356
+ numSms (props.multiProcessorCount),
357
+ sharedMemPerBlockOptin (props.sharedMemPerBlockOptin),
358
+ reservedSharedMemPerBlock (props.reservedSharedMemPerBlock)
359
+ {}
360
+
361
+ __OCC_INLINE
362
+ cudaOccDeviceProp()
363
+ : computeMajor (0),
364
+ computeMinor (0),
365
+ maxThreadsPerBlock (0),
366
+ maxThreadsPerMultiprocessor (0),
367
+ regsPerBlock (0),
368
+ regsPerMultiprocessor (0),
369
+ warpSize (0),
370
+ sharedMemPerBlock (0),
371
+ sharedMemPerMultiprocessor (0),
372
+ numSms (0),
373
+ sharedMemPerBlockOptin (0),
374
+ reservedSharedMemPerBlock (0)
375
+ {}
376
+ #endif // __cplusplus
377
+ };
378
+
379
+ /**
380
+ * Partitioned global caching option
381
+ */
382
+ typedef enum cudaOccPartitionedGCConfig_enum {
383
+ PARTITIONED_GC_OFF, // Disable partitioned global caching
384
+ PARTITIONED_GC_ON, // Prefer partitioned global caching
385
+ PARTITIONED_GC_ON_STRICT // Force partitioned global caching
386
+ } cudaOccPartitionedGCConfig;
387
+
388
+ /**
389
+ * Per function opt in maximum dynamic shared memory limit
390
+ */
391
+ typedef enum cudaOccFuncShmemConfig_enum {
392
+ FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit
393
+ FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit
394
+ } cudaOccFuncShmemConfig;
395
+
396
+ /**
397
+ * Function descriptor
398
+ *
399
+ * This structure describes a CUDA function.
400
+ */
401
+ struct cudaOccFuncAttributes {
402
+ int maxThreadsPerBlock; // Maximum block size the function can work with. If
403
+ // unlimited, use INT_MAX or any value greater than
404
+ // or equal to maxThreadsPerBlock of the device
405
+ int numRegs; // Number of registers used. When the function is
406
+ // launched on device, the register count may change
407
+ // due to internal tools requirements.
408
+ size_t sharedSizeBytes; // Number of static shared memory used
409
+
410
+ cudaOccPartitionedGCConfig partitionedGCConfig;
411
+ // Partitioned global caching is required to enable
412
+ // caching on certain chips, such as sm_52
413
+ // devices. Partitioned global caching can be
414
+ // automatically disabled if the occupancy
415
+ // requirement of the launch cannot support caching.
416
+ //
417
+ // To override this behavior with caching on and
418
+ // calculate occupancy strictly according to the
419
+ // preference, set partitionedGCConfig to
420
+ // PARTITIONED_GC_ON_STRICT. This is especially
421
+ // useful for experimenting and finding launch
422
+ // configurations (MaxPotentialOccupancyBlockSize)
423
+ // that allow global caching to take effect.
424
+ //
425
+ // This flag only affects the occupancy calculation.
426
+
427
+ cudaOccFuncShmemConfig shmemLimitConfig;
428
+ // Certain chips like sm_70 allow a user to opt into
429
+ // a higher per block limit of dynamic shared memory
430
+ // This optin is performed on a per function basis
431
+ // using the cuFuncSetAttribute function
432
+
433
+ size_t maxDynamicSharedSizeBytes;
434
+ // User set limit on maximum dynamic shared memory
435
+ // usable by the kernel
436
+ // This limit is set using the cuFuncSetAttribute
437
+ // function.
438
+
439
+ int numBlockBarriers; // Number of block barriers used (default to 1)
440
+ #ifdef __cplusplus
441
+ // This structure can be converted from a cudaFuncAttributes structure for
442
+ // users that use this header in their CUDA applications.
443
+ //
444
+ // If the application have access to the CUDA Runtime API, the application
445
+ // can obtain the function attributes of a CUDA kernel function through
446
+ // cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the
447
+ // cudaFuncAttributes structure.
448
+ //
449
+ // Example:
450
+ /*
451
+ __global__ void foo() {...}
452
+
453
+ ...
454
+
455
+ {
456
+ cudaFuncAttributes attr;
457
+
458
+ cudaFuncGetAttributes(&attr, foo);
459
+
460
+ cudaOccFuncAttributes occAttr = attr;
461
+
462
+ ...
463
+
464
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...);
465
+ }
466
+ */
467
+ //
468
+ template<typename FuncAttributes>
469
+ __OCC_INLINE
470
+ cudaOccFuncAttributes(const FuncAttributes &attr)
471
+ : maxThreadsPerBlock (attr.maxThreadsPerBlock),
472
+ numRegs (attr.numRegs),
473
+ sharedSizeBytes (attr.sharedSizeBytes),
474
+ partitionedGCConfig (PARTITIONED_GC_OFF),
475
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN),
476
+ maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes),
477
+ numBlockBarriers (1)
478
+ {}
479
+
480
+ __OCC_INLINE
481
+ cudaOccFuncAttributes()
482
+ : maxThreadsPerBlock (0),
483
+ numRegs (0),
484
+ sharedSizeBytes (0),
485
+ partitionedGCConfig (PARTITIONED_GC_OFF),
486
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT),
487
+ maxDynamicSharedSizeBytes (0),
488
+ numBlockBarriers (0)
489
+ {}
490
+ #endif
491
+ };
492
+
493
+ typedef enum cudaOccCacheConfig_enum {
494
+ CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default)
495
+ CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache
496
+ CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory
497
+ CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory
498
+ } cudaOccCacheConfig;
499
+
500
+ typedef enum cudaOccCarveoutConfig_enum {
501
+ SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default)
502
+ SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache
503
+ SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory
504
+ SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache
505
+ } cudaOccCarveoutConfig;
506
+
507
+ /**
508
+ * Device state descriptor
509
+ *
510
+ * This structure describes device settings that affect occupancy calculation.
511
+ */
512
+ struct cudaOccDeviceState
513
+ {
514
+ // Cache / shared memory split preference. Deprecated on Volta
515
+ cudaOccCacheConfig cacheConfig;
516
+ // Shared memory / L1 split preference. Supported on only Volta
517
+ int carveoutConfig;
518
+
519
+ #ifdef __cplusplus
520
+ __OCC_INLINE
521
+ cudaOccDeviceState()
522
+ : cacheConfig (CACHE_PREFER_NONE),
523
+ carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT)
524
+ {}
525
+ #endif
526
+ };
527
+
528
+ typedef enum cudaOccLimitingFactor_enum {
529
+ // Occupancy limited due to:
530
+ OCC_LIMIT_WARPS = 0x01, // - warps available
531
+ OCC_LIMIT_REGISTERS = 0x02, // - registers available
532
+ OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available
533
+ OCC_LIMIT_BLOCKS = 0x08, // - blocks available
534
+ OCC_LIMIT_BARRIERS = 0x10 // - barrier available
535
+ } cudaOccLimitingFactor;
536
+
537
+ /**
538
+ * Occupancy output
539
+ *
540
+ * This structure contains occupancy calculator's output.
541
+ */
542
+ struct cudaOccResult {
543
+ int activeBlocksPerMultiprocessor; // Occupancy
544
+ unsigned int limitingFactors; // Factors that limited occupancy. A bit
545
+ // field that counts the limiting
546
+ // factors, see cudaOccLimitingFactor
547
+ int blockLimitRegs; // Occupancy due to register
548
+ // usage, INT_MAX if the kernel does not
549
+ // use any register.
550
+ int blockLimitSharedMem; // Occupancy due to shared memory
551
+ // usage, INT_MAX if the kernel does not
552
+ // use shared memory.
553
+ int blockLimitWarps; // Occupancy due to block size limit
554
+ int blockLimitBlocks; // Occupancy due to maximum number of blocks
555
+ // managable per SM
556
+ int blockLimitBarriers; // Occupancy due to block barrier usage
557
+ int allocatedRegistersPerBlock; // Actual number of registers allocated per
558
+ // block
559
+ size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated
560
+ // per block
561
+ cudaOccPartitionedGCConfig partitionedGCConfig;
562
+ // Report if partitioned global caching
563
+ // is actually enabled.
564
+ };
565
+
566
+ /**
567
+ * Partitioned global caching support
568
+ *
569
+ * See cudaOccPartitionedGlobalCachingModeSupport
570
+ */
571
+ typedef enum cudaOccPartitionedGCSupport_enum {
572
+ PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported
573
+ PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported
574
+ } cudaOccPartitionedGCSupport;
575
+
576
+ /**
577
+ * Implementation
578
+ */
579
+
580
+ /**
581
+ * Max compute capability supported
582
+ */
583
+ #define __CUDA_OCC_MAJOR__ 9
584
+ #define __CUDA_OCC_MINOR__ 0
585
+
586
+ //////////////////////////////////////////
587
+ // Mathematical Helper Functions //
588
+ //////////////////////////////////////////
589
+
590
+ static __OCC_INLINE int __occMin(int lhs, int rhs)
591
+ {
592
+ return rhs < lhs ? rhs : lhs;
593
+ }
594
+
595
+ static __OCC_INLINE int __occDivideRoundUp(int x, int y)
596
+ {
597
+ return (x + (y - 1)) / y;
598
+ }
599
+
600
+ static __OCC_INLINE int __occRoundUp(int x, int y)
601
+ {
602
+ return y * __occDivideRoundUp(x, y);
603
+ }
604
+
605
+ //////////////////////////////////////////
606
+ // Architectural Properties //
607
+ //////////////////////////////////////////
608
+
609
+ /**
610
+ * Granularity of shared memory allocation
611
+ */
612
+ static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
613
+ {
614
+ int value;
615
+
616
+ switch(properties->computeMajor) {
617
+ case 3:
618
+ case 5:
619
+ case 6:
620
+ case 7:
621
+ value = 256;
622
+ break;
623
+ case 8:
624
+ case 9:
625
+ value = 128;
626
+ break;
627
+ default:
628
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
629
+ }
630
+
631
+ *limit = value;
632
+
633
+ return CUDA_OCC_SUCCESS;
634
+ }
635
+
636
+ /**
637
+ * Maximum number of registers per thread
638
+ */
639
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties)
640
+ {
641
+ int value;
642
+
643
+ switch(properties->computeMajor) {
644
+ case 3:
645
+ case 5:
646
+ case 6:
647
+ value = 255;
648
+ break;
649
+ case 7:
650
+ case 8:
651
+ case 9:
652
+ value = 256;
653
+ break;
654
+ default:
655
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
656
+ }
657
+
658
+ *limit = value;
659
+
660
+ return CUDA_OCC_SUCCESS;
661
+ }
662
+
663
+ /**
664
+ * Granularity of register allocation
665
+ */
666
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
667
+ {
668
+ int value;
669
+
670
+ switch(properties->computeMajor) {
671
+ case 3:
672
+ case 5:
673
+ case 6:
674
+ case 7:
675
+ case 8:
676
+ case 9:
677
+ value = 256;
678
+ break;
679
+ default:
680
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
681
+ }
682
+
683
+ *limit = value;
684
+
685
+ return CUDA_OCC_SUCCESS;
686
+ }
687
+
688
+ /**
689
+ * Number of sub-partitions
690
+ */
691
+ static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties)
692
+ {
693
+ int value;
694
+
695
+ switch(properties->computeMajor) {
696
+ case 3:
697
+ case 5:
698
+ case 7:
699
+ case 8:
700
+ case 9:
701
+ value = 4;
702
+ break;
703
+ case 6:
704
+ value = properties->computeMinor ? 4 : 2;
705
+ break;
706
+ default:
707
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
708
+ }
709
+
710
+ *limit = value;
711
+
712
+ return CUDA_OCC_SUCCESS;
713
+ }
714
+
715
+
716
+ /**
717
+ * Maximum number of blocks that can run simultaneously on a multiprocessor
718
+ */
719
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties)
720
+ {
721
+ int value;
722
+
723
+ switch(properties->computeMajor) {
724
+ case 3:
725
+ value = 16;
726
+ break;
727
+ case 5:
728
+ case 6:
729
+ value = 32;
730
+ break;
731
+ case 7: {
732
+ int isTuring = properties->computeMinor == 5;
733
+ value = (isTuring) ? 16 : 32;
734
+ break;
735
+ }
736
+ case 8:
737
+ if (properties->computeMinor == 0) {
738
+ value = 32;
739
+ }
740
+ else if (properties->computeMinor == 9) {
741
+ value = 24;
742
+ }
743
+ else {
744
+ value = 16;
745
+ }
746
+ break;
747
+ case 9:
748
+ value = 32;
749
+ break;
750
+ default:
751
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
752
+ }
753
+
754
+ *limit = value;
755
+
756
+ return CUDA_OCC_SUCCESS;
757
+ }
758
+
759
+ /**
760
+ * Align up shared memory based on compute major configurations
761
+ */
762
+ static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties)
763
+ {
764
+ // Volta and Turing have shared L1 cache / shared memory, and support cache
765
+ // configuration to trade one for the other. These values are needed to
766
+ // map carveout config ratio to the next available architecture size
767
+ size_t size = *shMemSize;
768
+
769
+ switch (properties->computeMajor) {
770
+ case 7: {
771
+ // Turing supports 32KB and 64KB shared mem.
772
+ int isTuring = properties->computeMinor == 5;
773
+ if (isTuring) {
774
+ if (size <= 32 * 1024) {
775
+ *shMemSize = 32 * 1024;
776
+ }
777
+ else if (size <= 64 * 1024) {
778
+ *shMemSize = 64 * 1024;
779
+ }
780
+ else {
781
+ return CUDA_OCC_ERROR_INVALID_INPUT;
782
+ }
783
+ }
784
+ // Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem.
785
+ else {
786
+ if (size == 0) {
787
+ *shMemSize = 0;
788
+ }
789
+ else if (size <= 8 * 1024) {
790
+ *shMemSize = 8 * 1024;
791
+ }
792
+ else if (size <= 16 * 1024) {
793
+ *shMemSize = 16 * 1024;
794
+ }
795
+ else if (size <= 32 * 1024) {
796
+ *shMemSize = 32 * 1024;
797
+ }
798
+ else if (size <= 64 * 1024) {
799
+ *shMemSize = 64 * 1024;
800
+ }
801
+ else if (size <= 96 * 1024) {
802
+ *shMemSize = 96 * 1024;
803
+ }
804
+ else {
805
+ return CUDA_OCC_ERROR_INVALID_INPUT;
806
+ }
807
+ }
808
+ break;
809
+ }
810
+ case 8:
811
+ if (properties->computeMinor == 0 || properties->computeMinor == 7) {
812
+ if (size == 0) {
813
+ *shMemSize = 0;
814
+ }
815
+ else if (size <= 8 * 1024) {
816
+ *shMemSize = 8 * 1024;
817
+ }
818
+ else if (size <= 16 * 1024) {
819
+ *shMemSize = 16 * 1024;
820
+ }
821
+ else if (size <= 32 * 1024) {
822
+ *shMemSize = 32 * 1024;
823
+ }
824
+ else if (size <= 64 * 1024) {
825
+ *shMemSize = 64 * 1024;
826
+ }
827
+ else if (size <= 100 * 1024) {
828
+ *shMemSize = 100 * 1024;
829
+ }
830
+ else if (size <= 132 * 1024) {
831
+ *shMemSize = 132 * 1024;
832
+ }
833
+ else if (size <= 164 * 1024) {
834
+ *shMemSize = 164 * 1024;
835
+ }
836
+ else {
837
+ return CUDA_OCC_ERROR_INVALID_INPUT;
838
+ }
839
+ }
840
+ else {
841
+ if (size == 0) {
842
+ *shMemSize = 0;
843
+ }
844
+ else if (size <= 8 * 1024) {
845
+ *shMemSize = 8 * 1024;
846
+ }
847
+ else if (size <= 16 * 1024) {
848
+ *shMemSize = 16 * 1024;
849
+ }
850
+ else if (size <= 32 * 1024) {
851
+ *shMemSize = 32 * 1024;
852
+ }
853
+ else if (size <= 64 * 1024) {
854
+ *shMemSize = 64 * 1024;
855
+ }
856
+ else if (size <= 100 * 1024) {
857
+ *shMemSize = 100 * 1024;
858
+ }
859
+ else {
860
+ return CUDA_OCC_ERROR_INVALID_INPUT;
861
+ }
862
+ }
863
+ break;
864
+ case 9: {
865
+ if (size == 0) {
866
+ *shMemSize = 0;
867
+ }
868
+ else if (size <= 8 * 1024) {
869
+ *shMemSize = 8 * 1024;
870
+ }
871
+ else if (size <= 16 * 1024) {
872
+ *shMemSize = 16 * 1024;
873
+ }
874
+ else if (size <= 32 * 1024) {
875
+ *shMemSize = 32 * 1024;
876
+ }
877
+ else if (size <= 64 * 1024) {
878
+ *shMemSize = 64 * 1024;
879
+ }
880
+ else if (size <= 100 * 1024) {
881
+ *shMemSize = 100 * 1024;
882
+ }
883
+ else if (size <= 132 * 1024) {
884
+ *shMemSize = 132 * 1024;
885
+ }
886
+ else if (size <= 164 * 1024) {
887
+ *shMemSize = 164 * 1024;
888
+ }
889
+ else if (size <= 196 * 1024) {
890
+ *shMemSize = 196 * 1024;
891
+ }
892
+ else if (size <= 228 * 1024) {
893
+ *shMemSize = 228 * 1024;
894
+ }
895
+ else {
896
+ return CUDA_OCC_ERROR_INVALID_INPUT;
897
+ }
898
+ break;
899
+ }
900
+ default:
901
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
902
+ }
903
+
904
+ return CUDA_OCC_SUCCESS;
905
+ }
906
+
907
+ /**
908
+ * Shared memory based on the new carveoutConfig API introduced with Volta
909
+ */
910
+ static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
911
+ {
912
+ cudaOccError status = CUDA_OCC_SUCCESS;
913
+ size_t preferenceShmemSize;
914
+
915
+ // CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported
916
+ // devices. This preference will take precedence over the older cacheConfig setting.
917
+ // Map cacheConfig to its effective preference value.
918
+ int effectivePreference = state->carveoutConfig;
919
+ if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) {
920
+ return CUDA_OCC_ERROR_INVALID_INPUT;
921
+ }
922
+
923
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
924
+ switch (state->cacheConfig)
925
+ {
926
+ case CACHE_PREFER_L1:
927
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1;
928
+ break;
929
+ case CACHE_PREFER_SHARED:
930
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED;
931
+ break;
932
+ case CACHE_PREFER_EQUAL:
933
+ effectivePreference = SHAREDMEM_CARVEOUT_HALF;
934
+ break;
935
+ default:
936
+ effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT;
937
+ break;
938
+ }
939
+ }
940
+
941
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
942
+ preferenceShmemSize = properties->sharedMemPerMultiprocessor;
943
+ }
944
+ else {
945
+ preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100;
946
+ }
947
+
948
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties);
949
+ *limit = preferenceShmemSize;
950
+ return status;
951
+ }
952
+
953
+ /**
954
+ * Shared memory based on the cacheConfig
955
+ */
956
+ static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
957
+ {
958
+ size_t bytes = 0;
959
+ size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor;
960
+ cudaOccCacheConfig cacheConfig = state->cacheConfig;
961
+
962
+ // Kepler has shared L1 cache / shared memory, and support cache
963
+ // configuration to trade one for the other. These values are needed to
964
+ // calculate the correct shared memory size for user requested cache
965
+ // configuration.
966
+ //
967
+ size_t minCacheSize = 16384;
968
+ size_t maxCacheSize = 49152;
969
+ size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize;
970
+ size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize;
971
+
972
+ switch (properties->computeMajor) {
973
+ case 3:
974
+ // Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest
975
+ // is shared memory.
976
+ //
977
+ switch (cacheConfig) {
978
+ default :
979
+ case CACHE_PREFER_NONE:
980
+ case CACHE_PREFER_SHARED:
981
+ bytes = sharedMemPerMultiprocessorHigh;
982
+ break;
983
+ case CACHE_PREFER_L1:
984
+ bytes = sharedMemPerMultiprocessorLow;
985
+ break;
986
+ case CACHE_PREFER_EQUAL:
987
+ // Equal is the mid-point between high and low. It should be
988
+ // equivalent to low + 16KB.
989
+ //
990
+ bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2;
991
+ break;
992
+ }
993
+ break;
994
+ case 5:
995
+ case 6:
996
+ // Maxwell and Pascal have dedicated shared memory.
997
+ //
998
+ bytes = sharedMemPerMultiprocessorHigh;
999
+ break;
1000
+ default:
1001
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1002
+ }
1003
+
1004
+ *limit = bytes;
1005
+
1006
+ return CUDA_OCC_SUCCESS;
1007
+ }
1008
+
1009
+ /**
1010
+ * Shared memory based on config requested by User
1011
+ */
1012
+ static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
1013
+ {
1014
+ // Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference,
1015
+ // it is handled separately from the cache config preference.
1016
+ if (properties->computeMajor >= 7) {
1017
+ return cudaOccSMemPreferenceVoltaPlus(limit, properties, state);
1018
+ }
1019
+ return cudaOccSMemPreference(limit, properties, state);
1020
+ }
1021
+
1022
+ /**
1023
+ * Return the per block shared memory limit based on function config
1024
+ */
1025
+ static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta)
1026
+ {
1027
+ switch (properties->computeMajor) {
1028
+ case 2:
1029
+ case 3:
1030
+ case 4:
1031
+ case 5:
1032
+ case 6:
1033
+ *limit = properties->sharedMemPerBlock;
1034
+ break;
1035
+ case 7:
1036
+ case 8:
1037
+ case 9:
1038
+ switch (shmemLimitConfig) {
1039
+ default:
1040
+ case FUNC_SHMEM_LIMIT_DEFAULT:
1041
+ *limit = properties->sharedMemPerBlock;
1042
+ break;
1043
+ case FUNC_SHMEM_LIMIT_OPTIN:
1044
+ if (smemPerCta > properties->sharedMemPerBlock) {
1045
+ *limit = properties->sharedMemPerBlockOptin;
1046
+ }
1047
+ else {
1048
+ *limit = properties->sharedMemPerBlock;
1049
+ }
1050
+ break;
1051
+ }
1052
+ break;
1053
+ default:
1054
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1055
+ }
1056
+
1057
+ // Starting Ampere, CUDA driver reserves additional shared memory per block
1058
+ if (properties->computeMajor >= 8) {
1059
+ *limit += properties->reservedSharedMemPerBlock;
1060
+ }
1061
+
1062
+ return CUDA_OCC_SUCCESS;
1063
+ }
1064
+
1065
+ /**
1066
+ * Partitioned global caching mode support
1067
+ */
1068
+ static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties)
1069
+ {
1070
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1071
+
1072
+ if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) ||
1073
+ properties->computeMajor == 6) {
1074
+ *limit = PARTITIONED_GC_SUPPORTED;
1075
+ }
1076
+
1077
+ if (properties->computeMajor == 6 && properties->computeMinor == 0) {
1078
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1079
+ }
1080
+
1081
+ return CUDA_OCC_SUCCESS;
1082
+ }
1083
+
1084
+ ///////////////////////////////////////////////
1085
+ // User Input Sanity //
1086
+ ///////////////////////////////////////////////
1087
+
1088
+ static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties)
1089
+ {
1090
+ // Verify device properties
1091
+ //
1092
+ // Each of these limits must be a positive number.
1093
+ //
1094
+ // Compute capacity is checked during the occupancy calculation
1095
+ //
1096
+ if (properties->maxThreadsPerBlock <= 0 ||
1097
+ properties->maxThreadsPerMultiprocessor <= 0 ||
1098
+ properties->regsPerBlock <= 0 ||
1099
+ properties->regsPerMultiprocessor <= 0 ||
1100
+ properties->warpSize <= 0 ||
1101
+ properties->sharedMemPerBlock <= 0 ||
1102
+ properties->sharedMemPerMultiprocessor <= 0 ||
1103
+ properties->numSms <= 0) {
1104
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1105
+ }
1106
+
1107
+ return CUDA_OCC_SUCCESS;
1108
+ }
1109
+
1110
+ static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes)
1111
+ {
1112
+ // Verify function attributes
1113
+ //
1114
+ if (attributes->maxThreadsPerBlock <= 0 ||
1115
+ attributes->numRegs < 0) { // Compiler may choose not to use
1116
+ // any register (empty kernels,
1117
+ // etc.)
1118
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1119
+ }
1120
+
1121
+ return CUDA_OCC_SUCCESS;
1122
+ }
1123
+
1124
+ static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state)
1125
+ {
1126
+ (void)state; // silence unused-variable warning
1127
+ // Placeholder
1128
+ //
1129
+
1130
+ return CUDA_OCC_SUCCESS;
1131
+ }
1132
+
1133
+ static __OCC_INLINE cudaOccError cudaOccInputCheck(
1134
+ const cudaOccDeviceProp *properties,
1135
+ const cudaOccFuncAttributes *attributes,
1136
+ const cudaOccDeviceState *state)
1137
+ {
1138
+ cudaOccError status = CUDA_OCC_SUCCESS;
1139
+
1140
+ status = cudaOccDevicePropCheck(properties);
1141
+ if (status != CUDA_OCC_SUCCESS) {
1142
+ return status;
1143
+ }
1144
+
1145
+ status = cudaOccFuncAttributesCheck(attributes);
1146
+ if (status != CUDA_OCC_SUCCESS) {
1147
+ return status;
1148
+ }
1149
+
1150
+ status = cudaOccDeviceStateCheck(state);
1151
+ if (status != CUDA_OCC_SUCCESS) {
1152
+ return status;
1153
+ }
1154
+
1155
+ return status;
1156
+ }
1157
+
1158
+ ///////////////////////////////////////////////
1159
+ // Occupancy calculation Functions //
1160
+ ///////////////////////////////////////////////
1161
+
1162
+ static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected(
1163
+ const cudaOccDeviceProp *properties,
1164
+ const cudaOccFuncAttributes *attributes)
1165
+ {
1166
+ cudaOccPartitionedGCSupport gcSupport;
1167
+ cudaOccPartitionedGCConfig gcConfig;
1168
+
1169
+ cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties);
1170
+
1171
+ gcConfig = attributes->partitionedGCConfig;
1172
+
1173
+ if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) {
1174
+ gcConfig = PARTITIONED_GC_OFF;
1175
+ }
1176
+
1177
+ return gcConfig;
1178
+ }
1179
+
1180
+ // Warp limit
1181
+ //
1182
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit(
1183
+ int *limit,
1184
+ cudaOccPartitionedGCConfig gcConfig,
1185
+ const cudaOccDeviceProp *properties,
1186
+ const cudaOccFuncAttributes *attributes,
1187
+ int blockSize)
1188
+ {
1189
+ cudaOccError status = CUDA_OCC_SUCCESS;
1190
+ int maxWarpsPerSm;
1191
+ int warpsAllocatedPerCTA;
1192
+ int maxBlocks;
1193
+ (void)attributes; // silence unused-variable warning
1194
+
1195
+ if (blockSize > properties->maxThreadsPerBlock) {
1196
+ maxBlocks = 0;
1197
+ }
1198
+ else {
1199
+ maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize;
1200
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1201
+ maxBlocks = 0;
1202
+
1203
+ if (gcConfig != PARTITIONED_GC_OFF) {
1204
+ int maxBlocksPerSmPartition;
1205
+ int maxWarpsPerSmPartition;
1206
+
1207
+ // If partitioned global caching is on, then a CTA can only use a SM
1208
+ // partition (a half SM), and thus a half of the warp slots
1209
+ // available per SM
1210
+ //
1211
+ maxWarpsPerSmPartition = maxWarpsPerSm / 2;
1212
+ maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA;
1213
+ maxBlocks = maxBlocksPerSmPartition * 2;
1214
+ }
1215
+ // On hardware that supports partitioned global caching, each half SM is
1216
+ // guaranteed to support at least 32 warps (maximum number of warps of a
1217
+ // CTA), so caching will not cause 0 occupancy due to insufficient warp
1218
+ // allocation slots.
1219
+ //
1220
+ else {
1221
+ maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA;
1222
+ }
1223
+ }
1224
+
1225
+ *limit = maxBlocks;
1226
+
1227
+ return status;
1228
+ }
1229
+
1230
+ // Shared memory limit
1231
+ //
1232
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit(
1233
+ int *limit,
1234
+ cudaOccResult *result,
1235
+ const cudaOccDeviceProp *properties,
1236
+ const cudaOccFuncAttributes *attributes,
1237
+ const cudaOccDeviceState *state,
1238
+ int blockSize,
1239
+ size_t dynamicSmemSize)
1240
+ {
1241
+ cudaOccError status = CUDA_OCC_SUCCESS;
1242
+ int allocationGranularity;
1243
+ size_t userSmemPreference = 0;
1244
+ size_t totalSmemUsagePerCTA;
1245
+ size_t maxSmemUsagePerCTA;
1246
+ size_t smemAllocatedPerCTA;
1247
+ size_t staticSmemSize;
1248
+ size_t sharedMemPerMultiprocessor;
1249
+ size_t smemLimitPerCTA;
1250
+ int maxBlocks;
1251
+ int dynamicSmemSizeExceeded = 0;
1252
+ int totalSmemSizeExceeded = 0;
1253
+ (void)blockSize; // silence unused-variable warning
1254
+
1255
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1256
+ if (status != CUDA_OCC_SUCCESS) {
1257
+ return status;
1258
+ }
1259
+
1260
+ // Obtain the user preferred shared memory size. This setting is ignored if
1261
+ // user requests more shared memory than preferred.
1262
+ //
1263
+ status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1264
+ if (status != CUDA_OCC_SUCCESS) {
1265
+ return status;
1266
+ }
1267
+
1268
+ staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock;
1269
+ totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize;
1270
+ smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity);
1271
+
1272
+ maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes;
1273
+
1274
+ dynamicSmemSizeExceeded = 0;
1275
+ totalSmemSizeExceeded = 0;
1276
+
1277
+ // Obtain the user set maximum dynamic size if it exists
1278
+ // If so, the current launch dynamic shared memory must not
1279
+ // exceed the set limit
1280
+ if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT &&
1281
+ dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) {
1282
+ dynamicSmemSizeExceeded = 1;
1283
+ }
1284
+
1285
+ status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA);
1286
+ if (status != CUDA_OCC_SUCCESS) {
1287
+ return status;
1288
+ }
1289
+
1290
+ if (smemAllocatedPerCTA > smemLimitPerCTA) {
1291
+ totalSmemSizeExceeded = 1;
1292
+ }
1293
+
1294
+ if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) {
1295
+ maxBlocks = 0;
1296
+ }
1297
+ else {
1298
+ // User requested shared memory limit is used as long as it is greater
1299
+ // than the total shared memory used per CTA, i.e. as long as at least
1300
+ // one CTA can be launched.
1301
+ if (userSmemPreference >= smemAllocatedPerCTA) {
1302
+ sharedMemPerMultiprocessor = userSmemPreference;
1303
+ }
1304
+ else {
1305
+ // On Volta+, user requested shared memory will limit occupancy
1306
+ // if it's less than shared memory per CTA. Otherwise, the
1307
+ // maximum shared memory limit is used.
1308
+ if (properties->computeMajor >= 7) {
1309
+ sharedMemPerMultiprocessor = smemAllocatedPerCTA;
1310
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties);
1311
+ if (status != CUDA_OCC_SUCCESS) {
1312
+ return status;
1313
+ }
1314
+ }
1315
+ else {
1316
+ sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor;
1317
+ }
1318
+ }
1319
+
1320
+ if (smemAllocatedPerCTA > 0) {
1321
+ maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA);
1322
+ }
1323
+ else {
1324
+ maxBlocks = INT_MAX;
1325
+ }
1326
+ }
1327
+
1328
+ result->allocatedSharedMemPerBlock = smemAllocatedPerCTA;
1329
+
1330
+ *limit = maxBlocks;
1331
+
1332
+ return status;
1333
+ }
1334
+
1335
+ static __OCC_INLINE
1336
+ cudaOccError cudaOccMaxBlocksPerSMRegsLimit(
1337
+ int *limit,
1338
+ cudaOccPartitionedGCConfig *gcConfig,
1339
+ cudaOccResult *result,
1340
+ const cudaOccDeviceProp *properties,
1341
+ const cudaOccFuncAttributes *attributes,
1342
+ int blockSize)
1343
+ {
1344
+ cudaOccError status = CUDA_OCC_SUCCESS;
1345
+ int allocationGranularity;
1346
+ int warpsAllocatedPerCTA;
1347
+ int regsAllocatedPerCTA;
1348
+ int regsAssumedPerCTA;
1349
+ int regsPerWarp;
1350
+ int regsAllocatedPerWarp;
1351
+ int numSubPartitions;
1352
+ int numRegsPerSubPartition;
1353
+ int numWarpsPerSubPartition;
1354
+ int numWarpsPerSM;
1355
+ int maxBlocks;
1356
+ int maxRegsPerThread;
1357
+
1358
+ status = cudaOccRegAllocationGranularity(
1359
+ &allocationGranularity,
1360
+ properties);
1361
+ if (status != CUDA_OCC_SUCCESS) {
1362
+ return status;
1363
+ }
1364
+
1365
+ status = cudaOccRegAllocationMaxPerThread(
1366
+ &maxRegsPerThread,
1367
+ properties);
1368
+ if (status != CUDA_OCC_SUCCESS) {
1369
+ return status;
1370
+ }
1371
+
1372
+ status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties);
1373
+ if (status != CUDA_OCC_SUCCESS) {
1374
+ return status;
1375
+ }
1376
+
1377
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1378
+
1379
+ // GPUs of compute capability 2.x and higher allocate registers to warps
1380
+ //
1381
+ // Number of regs per warp is regs per thread x warp size, rounded up to
1382
+ // register allocation granularity
1383
+ //
1384
+ regsPerWarp = attributes->numRegs * properties->warpSize;
1385
+ regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity);
1386
+ regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA;
1387
+
1388
+ // Hardware verifies if a launch fits the per-CTA register limit. For
1389
+ // historical reasons, the verification logic assumes register
1390
+ // allocations are made to all partitions simultaneously. Therefore, to
1391
+ // simulate the hardware check, the warp allocation needs to be rounded
1392
+ // up to the number of partitions.
1393
+ //
1394
+ regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions);
1395
+
1396
+ if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check
1397
+ properties->regsPerBlock < regsAllocatedPerCTA || // Software check
1398
+ attributes->numRegs > maxRegsPerThread) { // Per thread limit check
1399
+ maxBlocks = 0;
1400
+ }
1401
+ else {
1402
+ if (regsAllocatedPerWarp > 0) {
1403
+ // Registers are allocated in each sub-partition. The max number
1404
+ // of warps that can fit on an SM is equal to the max number of
1405
+ // warps per sub-partition x number of sub-partitions.
1406
+ //
1407
+ numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions;
1408
+ numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp;
1409
+
1410
+ maxBlocks = 0;
1411
+
1412
+ if (*gcConfig != PARTITIONED_GC_OFF) {
1413
+ int numSubPartitionsPerSmPartition;
1414
+ int numWarpsPerSmPartition;
1415
+ int maxBlocksPerSmPartition;
1416
+
1417
+ // If partitioned global caching is on, then a CTA can only
1418
+ // use a half SM, and thus a half of the registers available
1419
+ // per SM
1420
+ //
1421
+ numSubPartitionsPerSmPartition = numSubPartitions / 2;
1422
+ numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition;
1423
+ maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA;
1424
+ maxBlocks = maxBlocksPerSmPartition * 2;
1425
+ }
1426
+
1427
+ // Try again if partitioned global caching is not enabled, or if
1428
+ // the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter
1429
+ // case, the device will automatically turn off caching, except
1430
+ // if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate
1431
+ // occupancy and launch configuration.
1432
+ //
1433
+ if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) {
1434
+ // In case *gcConfig was PARTITIONED_GC_ON flip it OFF since
1435
+ // this is what it will be if we spread CTA across partitions.
1436
+ //
1437
+ *gcConfig = PARTITIONED_GC_OFF;
1438
+ numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions;
1439
+ maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA;
1440
+ }
1441
+ }
1442
+ else {
1443
+ maxBlocks = INT_MAX;
1444
+ }
1445
+ }
1446
+
1447
+
1448
+ result->allocatedRegistersPerBlock = regsAllocatedPerCTA;
1449
+
1450
+ *limit = maxBlocks;
1451
+
1452
+ return status;
1453
+ }
1454
+
1455
+ // Barrier limit
1456
+ //
1457
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMBlockBarrierLimit(
1458
+ int *limit,
1459
+ int ctaLimitBlocks,
1460
+ const cudaOccFuncAttributes *attributes)
1461
+ {
1462
+ cudaOccError status = CUDA_OCC_SUCCESS;
1463
+ int numBarriersAvailable = ctaLimitBlocks * 2;
1464
+ int numBarriersUsed = attributes->numBlockBarriers;
1465
+ int maxBlocks = INT_MAX;
1466
+
1467
+ if (numBarriersUsed) {
1468
+ maxBlocks = numBarriersAvailable / numBarriersUsed;
1469
+ }
1470
+
1471
+ *limit = maxBlocks;
1472
+
1473
+ return status;
1474
+ }
1475
+
1476
+ ///////////////////////////////////
1477
+ // API Implementations //
1478
+ ///////////////////////////////////
1479
+
1480
+ static __OCC_INLINE
1481
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
1482
+ cudaOccResult *result,
1483
+ const cudaOccDeviceProp *properties,
1484
+ const cudaOccFuncAttributes *attributes,
1485
+ const cudaOccDeviceState *state,
1486
+ int blockSize,
1487
+ size_t dynamicSmemSize)
1488
+ {
1489
+ cudaOccError status = CUDA_OCC_SUCCESS;
1490
+ int ctaLimitWarps = 0;
1491
+ int ctaLimitBlocks = 0;
1492
+ int ctaLimitSMem = 0;
1493
+ int ctaLimitRegs = 0;
1494
+ int ctaLimitBars = 0;
1495
+ int ctaLimit = 0;
1496
+ unsigned int limitingFactors = 0;
1497
+
1498
+ cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF;
1499
+
1500
+ if (!result || !properties || !attributes || !state || blockSize <= 0) {
1501
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1502
+ }
1503
+
1504
+ ///////////////////////////
1505
+ // Check user input
1506
+ ///////////////////////////
1507
+
1508
+ status = cudaOccInputCheck(properties, attributes, state);
1509
+ if (status != CUDA_OCC_SUCCESS) {
1510
+ return status;
1511
+ }
1512
+
1513
+ ///////////////////////////
1514
+ // Initialization
1515
+ ///////////////////////////
1516
+
1517
+ gcConfig = cudaOccPartitionedGCExpected(properties, attributes);
1518
+
1519
+ ///////////////////////////
1520
+ // Compute occupancy
1521
+ ///////////////////////////
1522
+
1523
+ // Limits due to registers/SM
1524
+ // Also compute if partitioned global caching has to be turned off
1525
+ //
1526
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize);
1527
+ if (status != CUDA_OCC_SUCCESS) {
1528
+ return status;
1529
+ }
1530
+
1531
+ // SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4.
1532
+ // As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x.
1533
+ // For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0),
1534
+ // we do not let it run on any Pascal processor, even though it may be able to run on GP100.
1535
+ // Therefore, we check the occupancy on GP10x when it can run on GP100
1536
+ //
1537
+ if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) {
1538
+ cudaOccDeviceProp propertiesGP10x;
1539
+ cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig;
1540
+ int ctaLimitRegsGP10x = 0;
1541
+
1542
+ // Set up properties for GP10x
1543
+ memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x));
1544
+ propertiesGP10x.computeMinor = 1;
1545
+
1546
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize);
1547
+ if (status != CUDA_OCC_SUCCESS) {
1548
+ return status;
1549
+ }
1550
+
1551
+ if (ctaLimitRegsGP10x == 0) {
1552
+ ctaLimitRegs = 0;
1553
+ }
1554
+ }
1555
+
1556
+ // Limits due to warps/SM
1557
+ //
1558
+ status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize);
1559
+ if (status != CUDA_OCC_SUCCESS) {
1560
+ return status;
1561
+ }
1562
+
1563
+ // Limits due to blocks/SM
1564
+ //
1565
+ status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties);
1566
+ if (status != CUDA_OCC_SUCCESS) {
1567
+ return status;
1568
+ }
1569
+
1570
+ // Limits due to shared memory/SM
1571
+ //
1572
+ status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize);
1573
+ if (status != CUDA_OCC_SUCCESS) {
1574
+ return status;
1575
+ }
1576
+
1577
+ ///////////////////////////
1578
+ // Overall occupancy
1579
+ ///////////////////////////
1580
+
1581
+ // Overall limit is min() of limits due to above reasons
1582
+ //
1583
+ ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks)));
1584
+
1585
+ // Determine occupancy limiting factors
1586
+ //
1587
+ if (ctaLimit == ctaLimitWarps) {
1588
+ limitingFactors |= OCC_LIMIT_WARPS;
1589
+ }
1590
+ if (ctaLimit == ctaLimitRegs) {
1591
+ limitingFactors |= OCC_LIMIT_REGISTERS;
1592
+ }
1593
+ if (ctaLimit == ctaLimitSMem) {
1594
+ limitingFactors |= OCC_LIMIT_SHARED_MEMORY;
1595
+ }
1596
+ if (ctaLimit == ctaLimitBlocks) {
1597
+ limitingFactors |= OCC_LIMIT_BLOCKS;
1598
+ }
1599
+
1600
+ // For Hopper onwards compute the limits to occupancy based on block barrier count
1601
+ //
1602
+ if (properties->computeMajor >= 9 && attributes->numBlockBarriers > 0) {
1603
+ // Limits due to barrier/SM
1604
+ //
1605
+ status = cudaOccMaxBlocksPerSMBlockBarrierLimit(&ctaLimitBars, ctaLimitBlocks, attributes);
1606
+ if (status != CUDA_OCC_SUCCESS) {
1607
+ return status;
1608
+ }
1609
+
1610
+ // Recompute overall limit based on barrier/SM
1611
+ //
1612
+ ctaLimit = __occMin(ctaLimitBars, ctaLimit);
1613
+
1614
+ // Determine if this is occupancy limiting factor
1615
+ //
1616
+ if (ctaLimit == ctaLimitBars) {
1617
+ limitingFactors |= OCC_LIMIT_BARRIERS;
1618
+ }
1619
+ }
1620
+ else {
1621
+ ctaLimitBars = INT_MAX;
1622
+ }
1623
+
1624
+ // Fill in the return values
1625
+ //
1626
+ result->limitingFactors = limitingFactors;
1627
+
1628
+ result->blockLimitRegs = ctaLimitRegs;
1629
+ result->blockLimitSharedMem = ctaLimitSMem;
1630
+ result->blockLimitWarps = ctaLimitWarps;
1631
+ result->blockLimitBlocks = ctaLimitBlocks;
1632
+ result->blockLimitBarriers = ctaLimitBars;
1633
+ result->partitionedGCConfig = gcConfig;
1634
+
1635
+ // Final occupancy
1636
+ result->activeBlocksPerMultiprocessor = ctaLimit;
1637
+
1638
+ return CUDA_OCC_SUCCESS;
1639
+ }
1640
+
1641
+ static __OCC_INLINE
1642
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
1643
+ size_t *bytesAvailable,
1644
+ const cudaOccDeviceProp *properties,
1645
+ const cudaOccFuncAttributes *attributes,
1646
+ const cudaOccDeviceState *state,
1647
+ int numBlocks,
1648
+ int blockSize)
1649
+ {
1650
+ int allocationGranularity;
1651
+ size_t smemLimitPerBlock;
1652
+ size_t smemAvailableForDynamic;
1653
+ size_t userSmemPreference = 0;
1654
+ size_t sharedMemPerMultiprocessor;
1655
+ cudaOccResult result;
1656
+ cudaOccError status = CUDA_OCC_SUCCESS;
1657
+
1658
+ if (numBlocks <= 0)
1659
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1660
+
1661
+ // First compute occupancy of potential kernel launch.
1662
+ //
1663
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0);
1664
+ if (status != CUDA_OCC_SUCCESS) {
1665
+ return status;
1666
+ }
1667
+ // Check if occupancy is achievable given user requested number of blocks.
1668
+ //
1669
+ if (result.activeBlocksPerMultiprocessor < numBlocks) {
1670
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1671
+ }
1672
+
1673
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1674
+ if (status != CUDA_OCC_SUCCESS) {
1675
+ return status;
1676
+ }
1677
+
1678
+ // Return the per block shared memory limit based on function config.
1679
+ //
1680
+ status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor);
1681
+ if (status != CUDA_OCC_SUCCESS) {
1682
+ return status;
1683
+ }
1684
+
1685
+ // If there is only a single block needed per SM, then the user preference can be ignored and the fully SW
1686
+ // limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user
1687
+ // preference sets the total limit of available shared memory.
1688
+ //
1689
+ cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1690
+ if (numBlocks == 1) {
1691
+ sharedMemPerMultiprocessor = smemLimitPerBlock;
1692
+ }
1693
+ else {
1694
+ if (!userSmemPreference) {
1695
+ userSmemPreference = 1 ;
1696
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties);
1697
+ if (status != CUDA_OCC_SUCCESS) {
1698
+ return status;
1699
+ }
1700
+ }
1701
+ sharedMemPerMultiprocessor = userSmemPreference;
1702
+ }
1703
+
1704
+ // Compute total shared memory available per SM
1705
+ //
1706
+ smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks;
1707
+ smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity;
1708
+
1709
+ // Cap shared memory
1710
+ //
1711
+ if (smemAvailableForDynamic > smemLimitPerBlock) {
1712
+ smemAvailableForDynamic = smemLimitPerBlock;
1713
+ }
1714
+
1715
+ // Now compute dynamic shared memory size
1716
+ smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes;
1717
+
1718
+ // Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute()
1719
+ //
1720
+ if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes)
1721
+ smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes;
1722
+
1723
+ *bytesAvailable = smemAvailableForDynamic;
1724
+ return CUDA_OCC_SUCCESS;
1725
+ }
1726
+
1727
+ static __OCC_INLINE
1728
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1729
+ int *minGridSize,
1730
+ int *blockSize,
1731
+ const cudaOccDeviceProp *properties,
1732
+ const cudaOccFuncAttributes *attributes,
1733
+ const cudaOccDeviceState *state,
1734
+ size_t (*blockSizeToDynamicSMemSize)(int),
1735
+ size_t dynamicSMemSize)
1736
+ {
1737
+ cudaOccError status = CUDA_OCC_SUCCESS;
1738
+ cudaOccResult result;
1739
+
1740
+ // Limits
1741
+ int occupancyLimit;
1742
+ int granularity;
1743
+ int blockSizeLimit;
1744
+
1745
+ // Recorded maximum
1746
+ int maxBlockSize = 0;
1747
+ int numBlocks = 0;
1748
+ int maxOccupancy = 0;
1749
+
1750
+ // Temporary
1751
+ int blockSizeToTryAligned;
1752
+ int blockSizeToTry;
1753
+ int blockSizeLimitAligned;
1754
+ int occupancyInBlocks;
1755
+ int occupancyInThreads;
1756
+
1757
+ ///////////////////////////
1758
+ // Check user input
1759
+ ///////////////////////////
1760
+
1761
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1762
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1763
+ }
1764
+
1765
+ status = cudaOccInputCheck(properties, attributes, state);
1766
+ if (status != CUDA_OCC_SUCCESS) {
1767
+ return status;
1768
+ }
1769
+
1770
+ /////////////////////////////////////////////////////////////////////////////////
1771
+ // Try each block size, and pick the block size with maximum occupancy
1772
+ /////////////////////////////////////////////////////////////////////////////////
1773
+
1774
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1775
+ granularity = properties->warpSize;
1776
+
1777
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1778
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1779
+
1780
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1781
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1782
+
1783
+ // Ignore dynamicSMemSize if the user provides a mapping
1784
+ //
1785
+ if (blockSizeToDynamicSMemSize) {
1786
+ dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry);
1787
+ }
1788
+
1789
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1790
+ &result,
1791
+ properties,
1792
+ attributes,
1793
+ state,
1794
+ blockSizeToTry,
1795
+ dynamicSMemSize);
1796
+
1797
+ if (status != CUDA_OCC_SUCCESS) {
1798
+ return status;
1799
+ }
1800
+
1801
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1802
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1803
+
1804
+ if (occupancyInThreads > maxOccupancy) {
1805
+ maxBlockSize = blockSizeToTry;
1806
+ numBlocks = occupancyInBlocks;
1807
+ maxOccupancy = occupancyInThreads;
1808
+ }
1809
+
1810
+ // Early out if we have reached the maximum
1811
+ //
1812
+ if (occupancyLimit == maxOccupancy) {
1813
+ break;
1814
+ }
1815
+ }
1816
+
1817
+ ///////////////////////////
1818
+ // Return best available
1819
+ ///////////////////////////
1820
+
1821
+ // Suggested min grid size to achieve a full machine launch
1822
+ //
1823
+ *minGridSize = numBlocks * properties->numSms;
1824
+ *blockSize = maxBlockSize;
1825
+
1826
+ return status;
1827
+ }
1828
+
1829
+
1830
+ #if defined(__cplusplus)
1831
+
1832
+ namespace {
1833
+
1834
+ __OCC_INLINE
1835
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1836
+ int *minGridSize,
1837
+ int *blockSize,
1838
+ const cudaOccDeviceProp *properties,
1839
+ const cudaOccFuncAttributes *attributes,
1840
+ const cudaOccDeviceState *state,
1841
+ size_t dynamicSMemSize)
1842
+ {
1843
+ return cudaOccMaxPotentialOccupancyBlockSize(
1844
+ minGridSize,
1845
+ blockSize,
1846
+ properties,
1847
+ attributes,
1848
+ state,
1849
+ NULL,
1850
+ dynamicSMemSize);
1851
+ }
1852
+
1853
+ template <typename UnaryFunction>
1854
+ __OCC_INLINE
1855
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
1856
+ int *minGridSize,
1857
+ int *blockSize,
1858
+ const cudaOccDeviceProp *properties,
1859
+ const cudaOccFuncAttributes *attributes,
1860
+ const cudaOccDeviceState *state,
1861
+ UnaryFunction blockSizeToDynamicSMemSize)
1862
+ {
1863
+ cudaOccError status = CUDA_OCC_SUCCESS;
1864
+ cudaOccResult result;
1865
+
1866
+ // Limits
1867
+ int occupancyLimit;
1868
+ int granularity;
1869
+ int blockSizeLimit;
1870
+
1871
+ // Recorded maximum
1872
+ int maxBlockSize = 0;
1873
+ int numBlocks = 0;
1874
+ int maxOccupancy = 0;
1875
+
1876
+ // Temporary
1877
+ int blockSizeToTryAligned;
1878
+ int blockSizeToTry;
1879
+ int blockSizeLimitAligned;
1880
+ int occupancyInBlocks;
1881
+ int occupancyInThreads;
1882
+ size_t dynamicSMemSize;
1883
+
1884
+ ///////////////////////////
1885
+ // Check user input
1886
+ ///////////////////////////
1887
+
1888
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1889
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1890
+ }
1891
+
1892
+ status = cudaOccInputCheck(properties, attributes, state);
1893
+ if (status != CUDA_OCC_SUCCESS) {
1894
+ return status;
1895
+ }
1896
+
1897
+ /////////////////////////////////////////////////////////////////////////////////
1898
+ // Try each block size, and pick the block size with maximum occupancy
1899
+ /////////////////////////////////////////////////////////////////////////////////
1900
+
1901
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1902
+ granularity = properties->warpSize;
1903
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1904
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1905
+
1906
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1907
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1908
+
1909
+ dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
1910
+
1911
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1912
+ &result,
1913
+ properties,
1914
+ attributes,
1915
+ state,
1916
+ blockSizeToTry,
1917
+ dynamicSMemSize);
1918
+
1919
+ if (status != CUDA_OCC_SUCCESS) {
1920
+ return status;
1921
+ }
1922
+
1923
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1924
+
1925
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1926
+
1927
+ if (occupancyInThreads > maxOccupancy) {
1928
+ maxBlockSize = blockSizeToTry;
1929
+ numBlocks = occupancyInBlocks;
1930
+ maxOccupancy = occupancyInThreads;
1931
+ }
1932
+
1933
+ // Early out if we have reached the maximum
1934
+ //
1935
+ if (occupancyLimit == maxOccupancy) {
1936
+ break;
1937
+ }
1938
+ }
1939
+
1940
+ ///////////////////////////
1941
+ // Return best available
1942
+ ///////////////////////////
1943
+
1944
+ // Suggested min grid size to achieve a full machine launch
1945
+ //
1946
+ *minGridSize = numBlocks * properties->numSms;
1947
+ *blockSize = maxBlockSize;
1948
+
1949
+ return status;
1950
+ }
1951
+
1952
+ } // namespace anonymous
1953
+
1954
+ #endif /*__cplusplus */
1955
+
1956
+ #undef __OCC_INLINE
1957
+
1958
+ #endif /*__cuda_occupancy_h__*/
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_H_
51
+ # define _CUDA_PIPELINE_H_
52
+
53
+ # include "cuda_pipeline_primitives.h"
54
+
55
+ # if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
56
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
57
+ -std=c++11 compiler option.
58
+ # endif
59
+
60
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
61
+ # include "cuda_awbarrier.h"
62
+ # endif
63
+
64
+ // Integration with libcu++'s cuda::barrier<cuda::thread_scope_block>.
65
+
66
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
67
+ # if defined(_LIBCUDACXX_CUDA_ABI_VERSION)
68
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION
69
+ # else
70
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4
71
+ # endif
72
+
73
+ # define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y
74
+ # define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y)
75
+ # define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION)
76
+
77
+ namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE {
78
+ struct __block_scope_barrier_base;
79
+ }}
80
+
81
+ # endif
82
+
83
+ _CUDA_PIPELINE_BEGIN_NAMESPACE
84
+
85
+ template<size_t N, typename T>
86
+ _CUDA_PIPELINE_QUALIFIER
87
+ auto segment(T* ptr) -> T(*)[N];
88
+
89
+ class pipeline {
90
+ public:
91
+ pipeline(const pipeline&) = delete;
92
+ pipeline(pipeline&&) = delete;
93
+ pipeline& operator=(const pipeline&) = delete;
94
+ pipeline& operator=(pipeline&&) = delete;
95
+
96
+ _CUDA_PIPELINE_QUALIFIER pipeline();
97
+ _CUDA_PIPELINE_QUALIFIER size_t commit();
98
+ _CUDA_PIPELINE_QUALIFIER void commit_and_wait();
99
+ _CUDA_PIPELINE_QUALIFIER void wait(size_t batch);
100
+ template<unsigned N>
101
+ _CUDA_PIPELINE_QUALIFIER void wait_prior();
102
+
103
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
104
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier);
105
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier);
106
+ # endif
107
+
108
+ private:
109
+ size_t current_batch;
110
+ };
111
+
112
+ template<class T>
113
+ _CUDA_PIPELINE_QUALIFIER
114
+ void memcpy_async(T& dst, const T& src, pipeline& pipe);
115
+
116
+ template<class T, size_t DstN, size_t SrcN>
117
+ _CUDA_PIPELINE_QUALIFIER
118
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe);
119
+
120
+ template<size_t N, typename T>
121
+ _CUDA_PIPELINE_QUALIFIER
122
+ auto segment(T* ptr) -> T(*)[N]
123
+ {
124
+ return (T(*)[N])ptr;
125
+ }
126
+
127
+ _CUDA_PIPELINE_QUALIFIER
128
+ pipeline::pipeline()
129
+ : current_batch(0)
130
+ {
131
+ }
132
+
133
+ _CUDA_PIPELINE_QUALIFIER
134
+ size_t pipeline::commit()
135
+ {
136
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
137
+ return this->current_batch++;
138
+ }
139
+
140
+ _CUDA_PIPELINE_QUALIFIER
141
+ void pipeline::commit_and_wait()
142
+ {
143
+ (void)pipeline::commit();
144
+ pipeline::wait_prior<0>();
145
+ }
146
+
147
+ _CUDA_PIPELINE_QUALIFIER
148
+ void pipeline::wait(size_t batch)
149
+ {
150
+ const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0;
151
+
152
+ switch (prior) {
153
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break;
154
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break;
155
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break;
156
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break;
157
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break;
158
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break;
159
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break;
160
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break;
161
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break;
162
+ }
163
+ }
164
+
165
+ template<unsigned N>
166
+ _CUDA_PIPELINE_QUALIFIER
167
+ void pipeline::wait_prior()
168
+ {
169
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<N>();
170
+ }
171
+
172
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
173
+ _CUDA_PIPELINE_QUALIFIER
174
+ void pipeline::arrive_on(awbarrier& barrier)
175
+ {
176
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier);
177
+ }
178
+
179
+ _CUDA_PIPELINE_QUALIFIER
180
+ void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier)
181
+ {
182
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast<uint64_t *>(&barrier));
183
+ }
184
+ # endif
185
+
186
+ template<class T>
187
+ _CUDA_PIPELINE_QUALIFIER
188
+ void memcpy_async(T& dst, const T& src, pipeline& pipe)
189
+ {
190
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&src) & (alignof(T) - 1)));
191
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&dst) & (alignof(T) - 1)));
192
+
193
+ if (__is_trivially_copyable(T)) {
194
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed<sizeof(T), alignof(T)>(
195
+ reinterpret_cast<void*>(&dst), reinterpret_cast<const void*>(&src));
196
+ } else {
197
+ dst = src;
198
+ }
199
+ }
200
+
201
+ template<class T, size_t DstN, size_t SrcN>
202
+ _CUDA_PIPELINE_QUALIFIER
203
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe)
204
+ {
205
+ constexpr size_t dst_size = sizeof(*dst);
206
+ constexpr size_t src_size = sizeof(*src);
207
+ static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size.");
208
+ static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size.");
209
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (dst_size - 1)));
210
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (dst_size - 1)));
211
+
212
+ if (__is_trivially_copyable(T)) {
213
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict<sizeof(*dst), sizeof(*src)>(
214
+ reinterpret_cast<void*>(*dst), reinterpret_cast<const void*>(*src));
215
+ } else {
216
+ for (size_t i = 0; i < DstN; ++i) {
217
+ (*dst)[i] = (i < SrcN) ? (*src)[i] : T();
218
+ }
219
+ }
220
+ }
221
+
222
+ _CUDA_PIPELINE_END_NAMESPACE
223
+
224
+ #endif /* !_CUDA_PIPELINE_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_HELPERS_H_
51
+ # define _CUDA_PIPELINE_HELPERS_H_
52
+
53
+ # define _CUDA_PIPELINE_NAMESPACE nvcuda::experimental
54
+ # define _CUDA_PIPELINE_BEGIN_NAMESPACE namespace nvcuda { namespace experimental {
55
+ # define _CUDA_PIPELINE_END_NAMESPACE } }
56
+
57
+ # define _CUDA_PIPELINE_INTERNAL_NAMESPACE _CUDA_PIPELINE_NAMESPACE::__pipeline_internal
58
+ # define _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE _CUDA_PIPELINE_BEGIN_NAMESPACE namespace __pipeline_internal {
59
+ # define _CUDA_PIPELINE_END_INTERNAL_NAMESPACE } _CUDA_PIPELINE_END_NAMESPACE
60
+
61
+ # if !defined(_CUDA_PIPELINE_QUALIFIER)
62
+ # define _CUDA_PIPELINE_QUALIFIER inline __device__
63
+ # endif
64
+ # if !defined(_CUDA_PIPELINE_STATIC_QUALIFIER)
65
+ # define _CUDA_PIPELINE_STATIC_QUALIFIER static inline __device__
66
+ # endif
67
+
68
+ # if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
69
+ # define _CUDA_PIPELINE_ARCH_700_OR_LATER
70
+ # endif
71
+
72
+ # if (__CUDA_ARCH__ >= 800)
73
+ # define _CUDA_PIPELINE_HAS_ASYNC_COPY 1
74
+ # else
75
+ # define _CUDA_PIPELINE_HAS_ASYNC_COPY 0
76
+ # endif
77
+
78
+ # if !defined(_CUDA_PIPELINE_MAX_STAGES)
79
+ # define _CUDA_PIPELINE_MAX_STAGES 8
80
+ # endif
81
+
82
+ # if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900)))
83
+ # define _CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER
84
+ # endif
85
+
86
+ # if !defined(_CUDA_PIPELINE_DEBUG)
87
+ # if defined(__CUDACC_DEBUG__)
88
+ # define _CUDA_PIPELINE_DEBUG 1
89
+ # else
90
+ # define _CUDA_PIPELINE_DEBUG 0
91
+ # endif
92
+ # endif
93
+
94
+ # if defined(_CUDA_PIPELINE_DEBUG) && (_CUDA_PIPELINE_DEBUG == 1) && !defined(NDEBUG)
95
+ # if !defined(__CUDACC_RTC__)
96
+ # include <cassert>
97
+ # endif
98
+ # define _CUDA_PIPELINE_ASSERT(x) assert((x));
99
+ # define _CUDA_PIPELINE_ABORT() assert(0);
100
+ # else
101
+ # define _CUDA_PIPELINE_ASSERT(x)
102
+ # define _CUDA_PIPELINE_ABORT() __trap();
103
+ # endif
104
+
105
+ # if defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
106
+ # define _CUDA_PIPELINE_STATIC_ASSERT(c, m) static_assert(c, m)
107
+ # else
108
+ # define _CUDA_PIPELINE_STATIC_ASSERT(c, m)
109
+ # endif
110
+
111
+ # if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__)
112
+ # define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "r"
113
+ # else
114
+ # define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "l"
115
+ # endif
116
+
117
+ # if defined(__CUDACC_RTC__)
118
+ typedef unsigned int uint32_t;
119
+ typedef unsigned long long uint64_t;
120
+ typedef uint64_t uintptr_t;
121
+ # else
122
+ # include <stdint.h>
123
+ # endif
124
+
125
+ _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE
126
+
127
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(short) == 2, "Size mismatch for type 'short'");
128
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int) == 4, "Size mismatch for type 'int'");
129
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int2) == 8, "Size mismatch for type 'int2'");
130
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int4) == 16, "Size mismatch for type 'int4'");
131
+
132
+ extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *);
133
+
134
+ template<size_t CopySize, size_t SourceSize>
135
+ _CUDA_PIPELINE_QUALIFIER
136
+ void pipeline_memcpy_sync(void* __restrict__ dst, const void* __restrict__ src)
137
+ {
138
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
139
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
140
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
141
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
142
+
143
+ char* const d = reinterpret_cast<char*>(dst);
144
+ const char* const s = reinterpret_cast<const char*>(src);
145
+
146
+ size_t copy_step_size;
147
+ if (SourceSize == 0) {
148
+ copy_step_size = CopySize;
149
+ } else if (SourceSize == 2 || SourceSize == 4 || SourceSize == 8 || SourceSize == 16) {
150
+ copy_step_size = SourceSize;
151
+ } else {
152
+ copy_step_size = 1;
153
+ }
154
+
155
+ for (size_t i = 0; i < CopySize; i += copy_step_size) {
156
+ const bool copy_source = SourceSize && (i < SourceSize);
157
+
158
+ switch (copy_step_size) {
159
+ case 1:
160
+ d[i] = copy_source ? s[i] : char();
161
+ break;
162
+ case 2:
163
+ *reinterpret_cast<short*>(d + i) = copy_source ? *reinterpret_cast<const short*>(s + i) : short();
164
+ break;
165
+ case 4:
166
+ *reinterpret_cast<int*>(d + i) = copy_source ? *reinterpret_cast<const int*>(s + i) : int();
167
+ break;
168
+ case 8:
169
+ *reinterpret_cast<int2*>(d + i) = copy_source ? *reinterpret_cast<const int2*>(s + i) : int2();
170
+ break;
171
+ case 16:
172
+ *reinterpret_cast<int4*>(d + i) = copy_source ? *reinterpret_cast<const int4*>(s + i) : int4();
173
+ break;
174
+ }
175
+ }
176
+ }
177
+
178
+ template<bool UseHwAsyncCopy>
179
+ struct ImplementationChooser;
180
+
181
+ template<>
182
+ struct ImplementationChooser<true> {
183
+ template<size_t CopySize, size_t SourceSize>
184
+ struct CpAsyncChooser {
185
+ _CUDA_PIPELINE_STATIC_QUALIFIER
186
+ void cp_async(void* __restrict__ dst, const void* __restrict__ src)
187
+ {
188
+ asm volatile ("cp.async.ca.shared.global [%0], [%1], %2, %3;"
189
+ :
190
+ : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(CopySize),
191
+ "n"(SourceSize)
192
+ : "memory");
193
+ }
194
+ };
195
+
196
+ template<size_t SourceSize>
197
+ struct CpAsyncChooser<16, SourceSize> {
198
+ _CUDA_PIPELINE_STATIC_QUALIFIER
199
+ void cp_async(void* __restrict__ dst, const void* __restrict__ src)
200
+ {
201
+ asm volatile ("cp.async.cg.shared.global [%0], [%1], %2, %3;"
202
+ :
203
+ : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(16), "n"(SourceSize)
204
+ : "memory");
205
+ }
206
+ };
207
+
208
+ template<size_t CopySize, size_t SourceSize>
209
+ _CUDA_PIPELINE_STATIC_QUALIFIER
210
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
211
+ {
212
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
213
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
214
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
215
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
216
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
217
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
218
+
219
+ CpAsyncChooser<CopySize, SourceSize>::cp_async(dst, src);
220
+ }
221
+
222
+ _CUDA_PIPELINE_STATIC_QUALIFIER
223
+ void pipeline_commit()
224
+ {
225
+ asm volatile ("cp.async.commit_group;");
226
+ }
227
+
228
+ template<unsigned N>
229
+ _CUDA_PIPELINE_STATIC_QUALIFIER
230
+ void pipeline_wait_prior()
231
+ {
232
+ asm volatile ("cp.async.wait_group %0;"
233
+ :
234
+ : "n"(N < _CUDA_PIPELINE_MAX_STAGES ? N : _CUDA_PIPELINE_MAX_STAGES));
235
+ }
236
+
237
+ _CUDA_PIPELINE_STATIC_QUALIFIER
238
+ void pipeline_arrive_on(uint64_t* barrier)
239
+ {
240
+ _CUDA_PIPELINE_ASSERT(__isShared(barrier));
241
+
242
+ asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];"
243
+ :
244
+ : "r"(__nvvm_get_smem_pointer(barrier)));
245
+ }
246
+ };
247
+
248
+ template<>
249
+ struct ImplementationChooser<false> {
250
+ template<size_t CopySize, size_t SourceSize>
251
+ _CUDA_PIPELINE_STATIC_QUALIFIER
252
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
253
+ {
254
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
255
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
256
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
257
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
258
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
259
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
260
+
261
+ pipeline_memcpy_sync<CopySize, SourceSize>(dst, src);
262
+ }
263
+
264
+ _CUDA_PIPELINE_STATIC_QUALIFIER
265
+ void pipeline_commit()
266
+ {
267
+ }
268
+
269
+ template<unsigned N>
270
+ _CUDA_PIPELINE_STATIC_QUALIFIER
271
+ void pipeline_wait_prior()
272
+ {
273
+ }
274
+
275
+ _CUDA_PIPELINE_STATIC_QUALIFIER
276
+ void pipeline_arrive_on(uint64_t* barrier)
277
+ {
278
+ }
279
+ };
280
+
281
+ template<size_t CopySize, size_t SourceSize>
282
+ _CUDA_PIPELINE_QUALIFIER
283
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
284
+ {
285
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
286
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
287
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
288
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
289
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
290
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
291
+
292
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_memcpy_async<CopySize, SourceSize>(dst, src);
293
+ }
294
+
295
+ _CUDA_PIPELINE_QUALIFIER
296
+ void pipeline_commit()
297
+ {
298
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_commit();
299
+ }
300
+
301
+ template<unsigned N>
302
+ _CUDA_PIPELINE_QUALIFIER
303
+ void pipeline_wait_prior()
304
+ {
305
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_wait_prior<N>();
306
+ }
307
+
308
+ _CUDA_PIPELINE_QUALIFIER
309
+ void pipeline_arrive_on(uint64_t* barrier)
310
+ {
311
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_arrive_on(barrier);
312
+ }
313
+
314
+ template<size_t CopySize, size_t SourceSize>
315
+ _CUDA_PIPELINE_QUALIFIER
316
+ void pipeline_copy_strict(void* __restrict__ dst, const void* __restrict__ src)
317
+ {
318
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
319
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size.");
320
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
321
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
322
+
323
+ if (__isGlobal(src) && __isShared(dst)) {
324
+ pipeline_memcpy_async<CopySize, SourceSize>(dst, src);
325
+ } else {
326
+ pipeline_memcpy_sync<CopySize, SourceSize>(dst, src);
327
+ }
328
+ }
329
+
330
+ template<size_t CopySize, size_t Align>
331
+ _CUDA_PIPELINE_QUALIFIER
332
+ void pipeline_copy_relaxed(void* __restrict__ dst, const void* __restrict__ src)
333
+ {
334
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (Align - 1)));
335
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (Align - 1)));
336
+
337
+ const char* s = reinterpret_cast<const char*>(src);
338
+ char* d = reinterpret_cast<char*>(dst);
339
+ size_t remaining = CopySize;
340
+
341
+ while (remaining) {
342
+ if ((Align >= 16) && (remaining >= 16)) {
343
+ pipeline_copy_strict<16, 16>(dst, src);
344
+ d += 16;
345
+ s += 16;
346
+ remaining -= 16;
347
+ } else if ((Align >= 8) && (remaining >= 8)) {
348
+ pipeline_copy_strict<8, 8>(dst, src);
349
+ d += 8;
350
+ s += 8;
351
+ remaining -= 8;
352
+ } else if ((Align >= 4) && (remaining >= 4)) {
353
+ pipeline_copy_strict<4, 4>(dst, src);
354
+ d += 4;
355
+ s += 4;
356
+ remaining -= 4;
357
+ } else if ((Align >= 2) && (remaining >= 2)) {
358
+ *reinterpret_cast<short*>(d) = *reinterpret_cast<const short*>(s);
359
+ d += 2;
360
+ s += 2;
361
+ remaining -= 2;
362
+ } else {
363
+ *d = *s;
364
+ d += 1;
365
+ s += 1;
366
+ remaining -= 1;
367
+ }
368
+ }
369
+ }
370
+
371
+ _CUDA_PIPELINE_END_INTERNAL_NAMESPACE
372
+
373
+ #endif /* !_CUDA_PIPELINE_HELPERS_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_PRIMITIVES_H_
51
+ # define _CUDA_PIPELINE_PRIMITIVES_H_
52
+
53
+ # include "cuda_pipeline_helpers.h"
54
+
55
+ _CUDA_PIPELINE_STATIC_QUALIFIER
56
+ void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align,
57
+ size_t zfill = 0)
58
+ {
59
+ _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16);
60
+ _CUDA_PIPELINE_ASSERT(zfill <= size_and_align);
61
+ _CUDA_PIPELINE_ASSERT(__isShared(dst_shared));
62
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src_global));
63
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst_shared) & (size_and_align - 1)));
64
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src_global) & (size_and_align - 1)));
65
+
66
+ switch (size_and_align) {
67
+ case 16:
68
+ switch (zfill) {
69
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return;
70
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return;
71
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return;
72
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return;
73
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return;
74
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return;
75
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return;
76
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return;
77
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return;
78
+ case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return;
79
+ case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return;
80
+ case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return;
81
+ case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return;
82
+ case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return;
83
+ case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return;
84
+ case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return;
85
+ case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return;
86
+ default: _CUDA_PIPELINE_ABORT(); return;
87
+ }
88
+ case 8:
89
+ switch (zfill) {
90
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return;
91
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return;
92
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return;
93
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return;
94
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return;
95
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return;
96
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return;
97
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return;
98
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return;
99
+ default: _CUDA_PIPELINE_ABORT(); return;
100
+ }
101
+ case 4:
102
+ switch (zfill) {
103
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return;
104
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return;
105
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return;
106
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return;
107
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return;
108
+ default: _CUDA_PIPELINE_ABORT(); return;
109
+ }
110
+ default:
111
+ _CUDA_PIPELINE_ABORT();
112
+ return;
113
+ }
114
+ }
115
+
116
+ _CUDA_PIPELINE_STATIC_QUALIFIER
117
+ void __pipeline_commit()
118
+ {
119
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
120
+ }
121
+
122
+ _CUDA_PIPELINE_STATIC_QUALIFIER
123
+ void __pipeline_wait_prior(size_t prior)
124
+ {
125
+ switch (prior) {
126
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return;
127
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return;
128
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return;
129
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return;
130
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return;
131
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return;
132
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return;
133
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return;
134
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return;
135
+ }
136
+ }
137
+
138
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
139
+ # include "cuda_awbarrier_primitives.h"
140
+
141
+ _CUDA_PIPELINE_STATIC_QUALIFIER
142
+ void __pipeline_arrive_on(__mbarrier_t* barrier)
143
+ {
144
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier);
145
+ }
146
+ # endif
147
+
148
+ #endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h ADDED
@@ -0,0 +1,2300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_RUNTIME_H__)
51
+ #define __CUDA_RUNTIME_H__
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
55
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__
56
+ #endif
57
+
58
+ #if !defined(__CUDACC_RTC__)
59
+ #if defined(__GNUC__)
60
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
61
+ #pragma GCC diagnostic push
62
+ #endif
63
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
64
+ #pragma GCC diagnostic ignored "-Wunused-function"
65
+ #endif
66
+ #elif defined(_MSC_VER)
67
+ #pragma warning(push)
68
+ #pragma warning(disable: 4820)
69
+ #endif
70
+ #endif
71
+
72
+ #ifdef __QNX__
73
+ #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
74
+ typedef unsigned size_t;
75
+ #endif
76
+ #endif
77
+ /*******************************************************************************
78
+ * *
79
+ * *
80
+ * *
81
+ *******************************************************************************/
82
+
83
+ #include "crt/host_config.h"
84
+
85
+ /*******************************************************************************
86
+ * *
87
+ * *
88
+ * *
89
+ *******************************************************************************/
90
+
91
+ #include "builtin_types.h"
92
+ #include "library_types.h"
93
+ #if !defined(__CUDACC_RTC__)
94
+ #define EXCLUDE_FROM_RTC
95
+ #include "channel_descriptor.h"
96
+ #include "cuda_runtime_api.h"
97
+ #include "driver_functions.h"
98
+ #undef EXCLUDE_FROM_RTC
99
+ #endif /* !__CUDACC_RTC__ */
100
+ #include "crt/host_defines.h"
101
+ #ifdef __CUDACC_RTC__
102
+ #include "target"
103
+ #endif /* defined(__CUDACC_RTC__) */
104
+
105
+
106
+ #include "vector_functions.h"
107
+
108
+ #if defined(__CUDACC__)
109
+
110
+ #if defined(__CUDACC_RTC__)
111
+ #include "nvrtc_device_runtime.h"
112
+ #include "crt/device_functions.h"
113
+ #include "crt/common_functions.h"
114
+ #include "device_launch_parameters.h"
115
+
116
+ #else /* !__CUDACC_RTC__ */
117
+ #define EXCLUDE_FROM_RTC
118
+ #include "crt/common_functions.h"
119
+ #include "crt/device_functions.h"
120
+ #include "device_launch_parameters.h"
121
+
122
+ #if defined(__CUDACC_EXTENDED_LAMBDA__)
123
+ #include <functional>
124
+ #include <utility>
125
+ struct __device_builtin__ __nv_lambda_preheader_injection { };
126
+ #endif /* defined(__CUDACC_EXTENDED_LAMBDA__) */
127
+
128
+ #undef EXCLUDE_FROM_RTC
129
+ #endif /* __CUDACC_RTC__ */
130
+
131
+ #endif /* __CUDACC__ */
132
+
133
+ /** \cond impl_private */
134
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
135
+ #define __CUDA_DEPRECATED
136
+ #elif defined(_MSC_VER)
137
+ #define __CUDA_DEPRECATED __declspec(deprecated)
138
+ #elif defined(__GNUC__)
139
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
140
+ #else
141
+ #define __CUDA_DEPRECATED
142
+ #endif
143
+ /** \endcond impl_private */
144
+
145
+ #if defined(__cplusplus) && !defined(__CUDACC_RTC__)
146
+
147
+ #if __cplusplus >= 201103
148
+ #include <utility>
149
+ #endif
150
+
151
+ /*******************************************************************************
152
+ * *
153
+ * *
154
+ * *
155
+ *******************************************************************************/
156
+
157
+ /**
158
+ * \addtogroup CUDART_HIGHLEVEL
159
+ * @{
160
+ */
161
+
162
+ /**
163
+ *\brief Launches a device function
164
+ *
165
+ * The function invokes kernel \p func on \p gridDim (\p gridDim.x &times; \p gridDim.y
166
+ * &times; \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x &times;
167
+ * \p blockDim.y &times; \p blockDim.z) threads.
168
+ *
169
+ * If the kernel has N parameters the \p args should point to array of N pointers.
170
+ * Each pointer, from <tt>args[0]</tt> to <tt>args[N - 1]</tt>, point to the region
171
+ * of memory from which the actual parameter will be copied.
172
+ *
173
+ * \p sharedMem sets the amount of dynamic shared memory that will be available to
174
+ * each thread block.
175
+ *
176
+ * \p stream specifies a stream the invocation is associated to.
177
+ *
178
+ * \param func - Device function symbol
179
+ * \param gridDim - Grid dimentions
180
+ * \param blockDim - Block dimentions
181
+ * \param args - Arguments
182
+ * \param sharedMem - Shared memory (defaults to 0)
183
+ * \param stream - Stream identifier (defaults to NULL)
184
+ *
185
+ * \return
186
+ * ::cudaSuccess,
187
+ * ::cudaErrorInvalidDeviceFunction,
188
+ * ::cudaErrorInvalidConfiguration,
189
+ * ::cudaErrorLaunchFailure,
190
+ * ::cudaErrorLaunchTimeout,
191
+ * ::cudaErrorLaunchOutOfResources,
192
+ * ::cudaErrorSharedObjectInitFailed,
193
+ * ::cudaErrorInvalidPtx,
194
+ * ::cudaErrorUnsupportedPtxVersion,
195
+ * ::cudaErrorNoKernelImageForDevice,
196
+ * ::cudaErrorJitCompilerNotFound,
197
+ * ::cudaErrorJitCompilationDisabled
198
+ * \notefnerr
199
+ * \note_async
200
+ * \note_null_stream
201
+ * \note_init_rt
202
+ * \note_callback
203
+ *
204
+ * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)"
205
+ */
206
+ template<class T>
207
+ static __inline__ __host__ cudaError_t cudaLaunchKernel(
208
+ const T *func,
209
+ dim3 gridDim,
210
+ dim3 blockDim,
211
+ void **args,
212
+ size_t sharedMem = 0,
213
+ cudaStream_t stream = 0
214
+ )
215
+ {
216
+ return ::cudaLaunchKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream);
217
+ }
218
+
219
+
220
+ #if __cplusplus >= 201103 || defined(__DOXYGEN_ONLY__)
221
+ /**
222
+ * \brief Launches a CUDA function with launch-time configuration
223
+ *
224
+ * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x
225
+ * &times; \p config->gridDim.y &times; \p config->gridDim.z) grid of blocks.
226
+ * Each block contains \p config->blockDim (\p config->blockDim.x &times;
227
+ * \p config->blockDim.y &times; \p config->blockDim.z) threads.
228
+ *
229
+ * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that
230
+ * will be available to each thread block.
231
+ *
232
+ * \p config->stream specifies a stream the invocation is associated to.
233
+ *
234
+ * Configuration beyond grid and block dimensions, dynamic shared memory size,
235
+ * and stream can be provided with the following two fields of \p config:
236
+ *
237
+ * \p config->attrs is an array of \p config->numAttrs contiguous
238
+ * ::cudaLaunchAttribute elements. The value of this pointer is not considered
239
+ * if \p config->numAttrs is zero. However, in that case, it is recommended to
240
+ * set the pointer to NULL.
241
+ * \p config->numAttrs is the number of attributes populating the first
242
+ * \p config->numAttrs positions of the \p config->attrs array.
243
+ *
244
+ * The kernel arguments should be passed as arguments to this function via the
245
+ * \p args parameter pack.
246
+ *
247
+ * The C API version of this function, \p cudaLaunchKernelExC, is also available
248
+ * for pre-C++11 compilers and for use cases where the ability to pass kernel
249
+ * parameters via void* array is preferable.
250
+ *
251
+ * \param config - Launch configuration
252
+ * \param func - Kernel to launch
253
+ * \param args - Parameter pack of kernel parameters
254
+ *
255
+ * \return
256
+ * ::cudaSuccess,
257
+ * ::cudaErrorInvalidDeviceFunction,
258
+ * ::cudaErrorInvalidConfiguration,
259
+ * ::cudaErrorLaunchFailure,
260
+ * ::cudaErrorLaunchTimeout,
261
+ * ::cudaErrorLaunchOutOfResources,
262
+ * ::cudaErrorSharedObjectInitFailed,
263
+ * ::cudaErrorInvalidPtx,
264
+ * ::cudaErrorUnsupportedPtxVersion,
265
+ * ::cudaErrorNoKernelImageForDevice,
266
+ * ::cudaErrorJitCompilerNotFound,
267
+ * ::cudaErrorJitCompilationDisabled
268
+ * \note_null_stream
269
+ * \notefnerr
270
+ * \note_init_rt
271
+ * \note_callback
272
+ *
273
+ * \sa
274
+ * \ref ::cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args) "cudaLaunchKernelEx (C API)",
275
+ * ::cuLaunchKernelEx
276
+ */
277
+ template<typename... ExpTypes, typename... ActTypes>
278
+ static __inline__ __host__ cudaError_t cudaLaunchKernelEx(
279
+ const cudaLaunchConfig_t *config,
280
+ void (*kernel)(ExpTypes...),
281
+ ActTypes &&... args
282
+ )
283
+ {
284
+ return [&](ExpTypes... coercedArgs){
285
+ void *pArgs[] = { &coercedArgs... };
286
+ return ::cudaLaunchKernelExC(config, (const void *)kernel, pArgs);
287
+ }(std::forward<ActTypes>(args)...);
288
+ }
289
+ #endif
290
+
291
+ /**
292
+ *\brief Launches a device function
293
+ *
294
+ * The function invokes kernel \p func on \p gridDim (\p gridDim.x &times; \p gridDim.y
295
+ * &times; \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x &times;
296
+ * \p blockDim.y &times; \p blockDim.z) threads.
297
+ *
298
+ * The device on which this kernel is invoked must have a non-zero value for
299
+ * the device attribute ::cudaDevAttrCooperativeLaunch.
300
+ *
301
+ * The total number of blocks launched cannot exceed the maximum number of blocks per
302
+ * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or
303
+ * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors
304
+ * as specified by the device attribute ::cudaDevAttrMultiProcessorCount.
305
+ *
306
+ * The kernel cannot make use of CUDA dynamic parallelism.
307
+ *
308
+ * If the kernel has N parameters the \p args should point to array of N pointers.
309
+ * Each pointer, from <tt>args[0]</tt> to <tt>args[N - 1]</tt>, point to the region
310
+ * of memory from which the actual parameter will be copied.
311
+ *
312
+ * \p sharedMem sets the amount of dynamic shared memory that will be available to
313
+ * each thread block.
314
+ *
315
+ * \p stream specifies a stream the invocation is associated to.
316
+ *
317
+ * \param func - Device function symbol
318
+ * \param gridDim - Grid dimentions
319
+ * \param blockDim - Block dimentions
320
+ * \param args - Arguments
321
+ * \param sharedMem - Shared memory (defaults to 0)
322
+ * \param stream - Stream identifier (defaults to NULL)
323
+ *
324
+ * \return
325
+ * ::cudaSuccess,
326
+ * ::cudaErrorInvalidDeviceFunction,
327
+ * ::cudaErrorInvalidConfiguration,
328
+ * ::cudaErrorLaunchFailure,
329
+ * ::cudaErrorLaunchTimeout,
330
+ * ::cudaErrorLaunchOutOfResources,
331
+ * ::cudaErrorSharedObjectInitFailed
332
+ * \notefnerr
333
+ * \note_async
334
+ * \note_null_stream
335
+ * \note_init_rt
336
+ * \note_callback
337
+ *
338
+ * \ref ::cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C API)"
339
+ */
340
+ template<class T>
341
+ static __inline__ __host__ cudaError_t cudaLaunchCooperativeKernel(
342
+ const T *func,
343
+ dim3 gridDim,
344
+ dim3 blockDim,
345
+ void **args,
346
+ size_t sharedMem = 0,
347
+ cudaStream_t stream = 0
348
+ )
349
+ {
350
+ return ::cudaLaunchCooperativeKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream);
351
+ }
352
+
353
+ /**
354
+ * \brief \hl Creates an event object with the specified flags
355
+ *
356
+ * Creates an event object with the specified flags. Valid flags include:
357
+ * - ::cudaEventDefault: Default event creation flag.
358
+ * - ::cudaEventBlockingSync: Specifies that event should use blocking
359
+ * synchronization. A host thread that uses ::cudaEventSynchronize() to wait
360
+ * on an event created with this flag will block until the event actually
361
+ * completes.
362
+ * - ::cudaEventDisableTiming: Specifies that the created event does not need
363
+ * to record timing data. Events created with this flag specified and
364
+ * the ::cudaEventBlockingSync flag not specified will provide the best
365
+ * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery().
366
+ *
367
+ * \param event - Newly created event
368
+ * \param flags - Flags for new event
369
+ *
370
+ * \return
371
+ * ::cudaSuccess,
372
+ * ::cudaErrorInvalidValue,
373
+ * ::cudaErrorLaunchFailure,
374
+ * ::cudaErrorMemoryAllocation
375
+ * \notefnerr
376
+ * \note_init_rt
377
+ * \note_callback
378
+ *
379
+ * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)",
380
+ * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery,
381
+ * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,
382
+ * ::cudaStreamWaitEvent
383
+ */
384
+ static __inline__ __host__ cudaError_t cudaEventCreate(
385
+ cudaEvent_t *event,
386
+ unsigned int flags
387
+ )
388
+ {
389
+ return ::cudaEventCreateWithFlags(event, flags);
390
+ }
391
+
392
+ /**
393
+ * \brief Creates an executable graph from a graph
394
+ *
395
+ * Instantiates \p graph as an executable graph. The graph is validated for any
396
+ * structural constraints or intra-node constraints which were not previously
397
+ * validated. If instantiation is successful, a handle to the instantiated graph
398
+ * is returned in \p pGraphExec.
399
+ *
400
+ * If there are any errors, diagnostic information may be returned in \p pErrorNode and
401
+ * \p pLogBuffer. This is the primary way to inspect instantiation errors. The output
402
+ * will be null terminated unless the diagnostics overflow
403
+ * the buffer. In this case, they will be truncated, and the last byte can be
404
+ * inspected to determine if truncation occurred.
405
+ *
406
+ * \param pGraphExec - Returns instantiated graph
407
+ * \param graph - Graph to instantiate
408
+ * \param pErrorNode - In case of an instantiation error, this may be modified to
409
+ * indicate a node contributing to the error
410
+ * \param pLogBuffer - A character buffer to store diagnostic messages
411
+ * \param bufferSize - Size of the log buffer in bytes
412
+ *
413
+ * \return
414
+ * ::cudaSuccess,
415
+ * ::cudaErrorInvalidValue
416
+ * \note_graph_thread_safety
417
+ * \notefnerr
418
+ * \note_init_rt
419
+ * \note_callback
420
+ *
421
+ * \sa
422
+ * ::cudaGraphInstantiateWithFlags,
423
+ * ::cudaGraphCreate,
424
+ * ::cudaGraphUpload,
425
+ * ::cudaGraphLaunch,
426
+ * ::cudaGraphExecDestroy
427
+ */
428
+ static __inline__ __host__ cudaError_t cudaGraphInstantiate(
429
+ cudaGraphExec_t *pGraphExec,
430
+ cudaGraph_t graph,
431
+ cudaGraphNode_t *pErrorNode,
432
+ char *pLogBuffer,
433
+ size_t bufferSize
434
+ )
435
+ {
436
+ (void)pErrorNode;
437
+ (void)pLogBuffer;
438
+ (void)bufferSize;
439
+ return ::cudaGraphInstantiate(pGraphExec, graph, 0);
440
+ }
441
+
442
+ /**
443
+ * \brief \hl Allocates page-locked memory on the host
444
+ *
445
+ * Allocates \p size bytes of host memory that is page-locked and accessible
446
+ * to the device. The driver tracks the virtual memory ranges allocated with
447
+ * this function and automatically accelerates calls to functions such as
448
+ * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it
449
+ * can be read or written with much higher bandwidth than pageable memory
450
+ * obtained with functions such as ::malloc(). Allocating excessive amounts of
451
+ * pinned memory may degrade system performance, since it reduces the amount
452
+ * of memory available to the system for paging. As a result, this function is
453
+ * best used sparingly to allocate staging areas for data exchange between host
454
+ * and device.
455
+ *
456
+ * The \p flags parameter enables different options to be specified that affect
457
+ * the allocation, as follows.
458
+ * - ::cudaHostAllocDefault: This flag's value is defined to be 0.
459
+ * - ::cudaHostAllocPortable: The memory returned by this call will be
460
+ * considered as pinned memory by all CUDA contexts, not just the one that
461
+ * performed the allocation.
462
+ * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space.
463
+ * The device pointer to the memory may be obtained by calling
464
+ * ::cudaHostGetDevicePointer().
465
+ * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC).
466
+ * WC memory can be transferred across the PCI Express bus more quickly on some
467
+ * system configurations, but cannot be read efficiently by most CPUs. WC
468
+ * memory is a good option for buffers that will be written by the CPU and read
469
+ * by the device via mapped pinned memory or host->device transfers.
470
+ *
471
+ * All of these flags are orthogonal to one another: a developer may allocate
472
+ * memory that is portable, mapped and/or write-combined with no restrictions.
473
+ *
474
+ * ::cudaSetDeviceFlags() must have been called with the ::cudaDeviceMapHost
475
+ * flag in order for the ::cudaHostAllocMapped flag to have any effect.
476
+ *
477
+ * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices
478
+ * that do not support mapped pinned memory. The failure is deferred to
479
+ * ::cudaHostGetDevicePointer() because the memory may be mapped into other
480
+ * CUDA contexts via the ::cudaHostAllocPortable flag.
481
+ *
482
+ * Memory allocated by this function must be freed with ::cudaFreeHost().
483
+ *
484
+ * \param ptr - Device pointer to allocated memory
485
+ * \param size - Requested allocation size in bytes
486
+ * \param flags - Requested properties of allocated memory
487
+ *
488
+ * \return
489
+ * ::cudaSuccess,
490
+ * ::cudaErrorMemoryAllocation
491
+ * \notefnerr
492
+ * \note_init_rt
493
+ * \note_callback
494
+ *
495
+ * \sa ::cudaSetDeviceFlags,
496
+ * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)",
497
+ * ::cudaFreeHost, ::cudaHostAlloc
498
+ */
499
+ static __inline__ __host__ cudaError_t cudaMallocHost(
500
+ void **ptr,
501
+ size_t size,
502
+ unsigned int flags
503
+ )
504
+ {
505
+ return ::cudaHostAlloc(ptr, size, flags);
506
+ }
507
+
508
+ template<class T>
509
+ static __inline__ __host__ cudaError_t cudaHostAlloc(
510
+ T **ptr,
511
+ size_t size,
512
+ unsigned int flags
513
+ )
514
+ {
515
+ return ::cudaHostAlloc((void**)(void*)ptr, size, flags);
516
+ }
517
+
518
+ template<class T>
519
+ static __inline__ __host__ cudaError_t cudaHostGetDevicePointer(
520
+ T **pDevice,
521
+ void *pHost,
522
+ unsigned int flags
523
+ )
524
+ {
525
+ return ::cudaHostGetDevicePointer((void**)(void*)pDevice, pHost, flags);
526
+ }
527
+
528
+ /**
529
+ * \brief Allocates memory that will be automatically managed by the Unified Memory system
530
+ *
531
+ * Allocates \p size bytes of managed memory on the device and returns in
532
+ * \p *devPtr a pointer to the allocated memory. If the device doesn't support
533
+ * allocating managed memory, ::cudaErrorNotSupported is returned. Support
534
+ * for managed memory can be queried using the device attribute
535
+ * ::cudaDevAttrManagedMemory. The allocated memory is suitably
536
+ * aligned for any kind of variable. The memory is not cleared. If \p size
537
+ * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer
538
+ * is valid on the CPU and on all GPUs in the system that support managed memory.
539
+ * All accesses to this pointer must obey the Unified Memory programming model.
540
+ *
541
+ * \p flags specifies the default stream association for this allocation.
542
+ * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The
543
+ * default value for \p flags is ::cudaMemAttachGlobal.
544
+ * If ::cudaMemAttachGlobal is specified, then this memory is accessible from
545
+ * any stream on any device. If ::cudaMemAttachHost is specified, then the
546
+ * allocation should not be accessed from devices that have a zero value for the
547
+ * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to
548
+ * ::cudaStreamAttachMemAsync will be required to enable access on such devices.
549
+ *
550
+ * If the association is later changed via ::cudaStreamAttachMemAsync to
551
+ * a single stream, the default association, as specifed during ::cudaMallocManaged,
552
+ * is restored when that stream is destroyed. For __managed__ variables, the
553
+ * default association is always ::cudaMemAttachGlobal. Note that destroying a
554
+ * stream is an asynchronous operation, and as a result, the change to default
555
+ * association won't happen until all work in the stream has completed.
556
+ *
557
+ * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree.
558
+ *
559
+ * Device memory oversubscription is possible for GPUs that have a non-zero value for the
560
+ * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on
561
+ * such GPUs may be evicted from device memory to host memory at any time by the Unified
562
+ * Memory driver in order to make room for other allocations.
563
+ *
564
+ * In a multi-GPU system where all GPUs have a non-zero value for the device attribute
565
+ * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this
566
+ * API returns and instead may be populated on access. In such systems, managed memory can
567
+ * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to
568
+ * maintain data locality and prevent excessive page faults to the extent possible. The application
569
+ * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application
570
+ * can also explicitly migrate memory to a desired processor's memory via
571
+ * ::cudaMemPrefetchAsync.
572
+ *
573
+ * In a multi-GPU system where all of the GPUs have a zero value for the device attribute
574
+ * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support
575
+ * with each other, the physical storage for managed memory is created on the GPU which is active
576
+ * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced
577
+ * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate
578
+ * memory among such GPUs.
579
+ *
580
+ * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and
581
+ * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess
582
+ * is zero for at least one of those GPUs, the location chosen for physical storage of managed
583
+ * memory is system-dependent.
584
+ * - On Linux, the location chosen will be device memory as long as the current set of active
585
+ * contexts are on devices that either have peer-to-peer support with each other or have a
586
+ * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess.
587
+ * If there is an active context on a GPU that does not have a non-zero value for that device
588
+ * attribute and it does not have peer-to-peer support with the other devices that have active
589
+ * contexts on them, then the location for physical storage will be 'zero-copy' or host memory.
590
+ * Note that this means that managed memory that is located in device memory is migrated to
591
+ * host memory if a new context is created on a GPU that doesn't have a non-zero value for
592
+ * the device attribute and does not support peer-to-peer with at least one of the other devices
593
+ * that has an active context. This in turn implies that context creation may fail if there is
594
+ * insufficient host memory to migrate all managed allocations.
595
+ * - On Windows, the physical storage is always created in 'zero-copy' or host memory.
596
+ * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these
597
+ * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to
598
+ * restrict CUDA to only use those GPUs that have peer-to-peer support.
599
+ * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero
600
+ * value to force the driver to always use device memory for physical storage.
601
+ * When this environment variable is set to a non-zero value, all devices used in
602
+ * that process that support managed memory have to be peer-to-peer compatible
603
+ * with each other. The error ::cudaErrorInvalidDevice will be returned if a device
604
+ * that supports managed memory is used and it is not peer-to-peer compatible with
605
+ * any of the other managed memory supporting devices that were previously used in
606
+ * that process, even if ::cudaDeviceReset has been called on those devices. These
607
+ * environment variables are described in the CUDA programming guide under the
608
+ * "CUDA environment variables" section.
609
+ * - On ARM, managed memory is not available on discrete gpu with Drive PX-2.
610
+ *
611
+ * \param devPtr - Pointer to allocated device memory
612
+ * \param size - Requested allocation size in bytes
613
+ * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal)
614
+ *
615
+ * \return
616
+ * ::cudaSuccess,
617
+ * ::cudaErrorMemoryAllocation,
618
+ * ::cudaErrorNotSupported,
619
+ * ::cudaErrorInvalidValue
620
+ * \note_init_rt
621
+ * \note_callback
622
+ *
623
+ * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray,
624
+ * ::cudaMalloc3D, ::cudaMalloc3DArray,
625
+ * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)",
626
+ * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync
627
+ */
628
+ template<class T>
629
+ static __inline__ __host__ cudaError_t cudaMallocManaged(
630
+ T **devPtr,
631
+ size_t size,
632
+ unsigned int flags = cudaMemAttachGlobal
633
+ )
634
+ {
635
+ return ::cudaMallocManaged((void**)(void*)devPtr, size, flags);
636
+ }
637
+
638
+ /**
639
+ * \brief Attach memory to a stream asynchronously
640
+ *
641
+ * Enqueues an operation in \p stream to specify stream association of
642
+ * \p length bytes of memory starting from \p devPtr. This function is a
643
+ * stream-ordered operation, meaning that it is dependent on, and will
644
+ * only take effect when, previous work in stream has completed. Any
645
+ * previous association is automatically replaced.
646
+ *
647
+ * \p devPtr must point to an one of the following types of memories:
648
+ * - managed memory declared using the __managed__ keyword or allocated with
649
+ * ::cudaMallocManaged.
650
+ * - a valid host-accessible region of system-allocated pageable memory. This
651
+ * type of memory may only be specified if the device associated with the
652
+ * stream reports a non-zero value for the device attribute
653
+ * ::cudaDevAttrPageableMemoryAccess.
654
+ *
655
+ * For managed allocations, \p length must be either zero or the entire
656
+ * allocation's size. Both indicate that the entire allocation's stream
657
+ * association is being changed. Currently, it is not possible to change stream
658
+ * association for a portion of a managed allocation.
659
+ *
660
+ * For pageable allocations, \p length must be non-zero.
661
+ *
662
+ * The stream association is specified using \p flags which must be
663
+ * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle.
664
+ * The default value for \p flags is ::cudaMemAttachSingle
665
+ * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed
666
+ * by any stream on any device.
667
+ * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee
668
+ * that it won't access the memory on the device from any stream on a device that
669
+ * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess.
670
+ * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with
671
+ * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess,
672
+ * the program makes a guarantee that it will only access the memory on the device
673
+ * from \p stream. It is illegal to attach singly to the NULL stream, because the
674
+ * NULL stream is a virtual global stream and not a specific stream. An error will
675
+ * be returned in this case.
676
+ *
677
+ * When memory is associated with a single stream, the Unified Memory system will
678
+ * allow CPU access to this memory region so long as all operations in \p stream
679
+ * have completed, regardless of whether other streams are active. In effect,
680
+ * this constrains exclusive ownership of the managed memory region by
681
+ * an active GPU to per-stream activity instead of whole-GPU activity.
682
+ *
683
+ * Accessing memory on the device from streams that are not associated with
684
+ * it will produce undefined results. No error checking is performed by the
685
+ * Unified Memory system to ensure that kernels launched into other streams
686
+ * do not access this region.
687
+ *
688
+ * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync
689
+ * via events, synchronization or other means to ensure legal access to memory
690
+ * at all times. Data visibility and coherency will be changed appropriately
691
+ * for all kernels which follow a stream-association change.
692
+ *
693
+ * If \p stream is destroyed while data is associated with it, the association is
694
+ * removed and the association reverts to the default visibility of the allocation
695
+ * as specified at ::cudaMallocManaged. For __managed__ variables, the default
696
+ * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an
697
+ * asynchronous operation, and as a result, the change to default association won't
698
+ * happen until all work in the stream has completed.
699
+ *
700
+ * \param stream - Stream in which to enqueue the attach operation
701
+ * \param devPtr - Pointer to memory (must be a pointer to managed memory or
702
+ * to a valid host-accessible region of system-allocated
703
+ * memory)
704
+ * \param length - Length of memory (defaults to zero)
705
+ * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle)
706
+ *
707
+ * \return
708
+ * ::cudaSuccess,
709
+ * ::cudaErrorNotReady,
710
+ * ::cudaErrorInvalidValue,
711
+ * ::cudaErrorInvalidResourceHandle
712
+ * \notefnerr
713
+ * \note_init_rt
714
+ * \note_callback
715
+ *
716
+ * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged
717
+ */
718
+ template<class T>
719
+ static __inline__ __host__ cudaError_t cudaStreamAttachMemAsync(
720
+ cudaStream_t stream,
721
+ T *devPtr,
722
+ size_t length = 0,
723
+ unsigned int flags = cudaMemAttachSingle
724
+ )
725
+ {
726
+ return ::cudaStreamAttachMemAsync(stream, (void*)devPtr, length, flags);
727
+ }
728
+
729
+ template<class T>
730
+ static __inline__ __host__ cudaError_t cudaMalloc(
731
+ T **devPtr,
732
+ size_t size
733
+ )
734
+ {
735
+ return ::cudaMalloc((void**)(void*)devPtr, size);
736
+ }
737
+
738
+ template<class T>
739
+ static __inline__ __host__ cudaError_t cudaMallocHost(
740
+ T **ptr,
741
+ size_t size,
742
+ unsigned int flags = 0
743
+ )
744
+ {
745
+ return cudaMallocHost((void**)(void*)ptr, size, flags);
746
+ }
747
+
748
+ template<class T>
749
+ static __inline__ __host__ cudaError_t cudaMallocPitch(
750
+ T **devPtr,
751
+ size_t *pitch,
752
+ size_t width,
753
+ size_t height
754
+ )
755
+ {
756
+ return ::cudaMallocPitch((void**)(void*)devPtr, pitch, width, height);
757
+ }
758
+
759
+ /**
760
+ * \brief Allocate from a pool
761
+ *
762
+ * This is an alternate spelling for cudaMallocFromPoolAsync
763
+ * made available through operator overloading.
764
+ *
765
+ * \sa ::cudaMallocFromPoolAsync,
766
+ * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaStream_t hStream) "cudaMallocAsync (C API)"
767
+ */
768
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
769
+ void **ptr,
770
+ size_t size,
771
+ cudaMemPool_t memPool,
772
+ cudaStream_t stream
773
+ )
774
+ {
775
+ return ::cudaMallocFromPoolAsync(ptr, size, memPool, stream);
776
+ }
777
+
778
+ template<class T>
779
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
780
+ T **ptr,
781
+ size_t size,
782
+ cudaMemPool_t memPool,
783
+ cudaStream_t stream
784
+ )
785
+ {
786
+ return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream);
787
+ }
788
+
789
+ template<class T>
790
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
791
+ T **ptr,
792
+ size_t size,
793
+ cudaStream_t stream
794
+ )
795
+ {
796
+ return ::cudaMallocAsync((void**)(void*)ptr, size, stream);
797
+ }
798
+
799
+ template<class T>
800
+ static __inline__ __host__ cudaError_t cudaMallocFromPoolAsync(
801
+ T **ptr,
802
+ size_t size,
803
+ cudaMemPool_t memPool,
804
+ cudaStream_t stream
805
+ )
806
+ {
807
+ return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream);
808
+ }
809
+
810
+ #if defined(__CUDACC__)
811
+
812
+ /**
813
+ * \brief \hl Copies data to the given symbol on the device
814
+ *
815
+ * Copies \p count bytes from the memory area pointed to by \p src
816
+ * to the memory area \p offset bytes from the start of symbol
817
+ * \p symbol. The memory areas may not overlap. \p symbol is a variable that
818
+ * resides in global or constant memory space. \p kind can be either
819
+ * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice.
820
+ *
821
+ * \param symbol - Device symbol reference
822
+ * \param src - Source memory address
823
+ * \param count - Size in bytes to copy
824
+ * \param offset - Offset from start of symbol in bytes
825
+ * \param kind - Type of transfer
826
+ *
827
+ * \return
828
+ * ::cudaSuccess,
829
+ * ::cudaErrorInvalidValue,
830
+ * ::cudaErrorInvalidSymbol,
831
+ * ::cudaErrorInvalidMemcpyDirection,
832
+ * ::cudaErrorNoKernelImageForDevice
833
+ * \notefnerr
834
+ * \note_sync
835
+ * \note_string_api_deprecation
836
+ * \note_init_rt
837
+ * \note_callback
838
+ *
839
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
840
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
841
+ * ::cudaMemcpy2DArrayToArray,
842
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
843
+ * ::cudaMemcpy2DToArrayAsync,
844
+ * ::cudaMemcpy2DFromArrayAsync,
845
+ * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync
846
+ */
847
+ template<class T>
848
+ static __inline__ __host__ cudaError_t cudaMemcpyToSymbol(
849
+ const T &symbol,
850
+ const void *src,
851
+ size_t count,
852
+ size_t offset = 0,
853
+ enum cudaMemcpyKind kind = cudaMemcpyHostToDevice
854
+ )
855
+ {
856
+ return ::cudaMemcpyToSymbol((const void*)&symbol, src, count, offset, kind);
857
+ }
858
+
859
+ /**
860
+ * \brief \hl Copies data to the given symbol on the device
861
+ *
862
+ * Copies \p count bytes from the memory area pointed to by \p src
863
+ * to the memory area \p offset bytes from the start of symbol
864
+ * \p symbol. The memory areas may not overlap. \p symbol is a variable that
865
+ * resides in global or constant memory space. \p kind can be either
866
+ * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice.
867
+ *
868
+ * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so
869
+ * the call may return before the copy is complete. The copy can optionally
870
+ * be associated to a stream by passing a non-zero \p stream argument. If
871
+ * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy
872
+ * may overlap with operations in other streams.
873
+ *
874
+ * \param symbol - Device symbol reference
875
+ * \param src - Source memory address
876
+ * \param count - Size in bytes to copy
877
+ * \param offset - Offset from start of symbol in bytes
878
+ * \param kind - Type of transfer
879
+ * \param stream - Stream identifier
880
+ *
881
+ * \return
882
+ * ::cudaSuccess,
883
+ * ::cudaErrorInvalidValue,
884
+ * ::cudaErrorInvalidSymbol,
885
+ * ::cudaErrorInvalidMemcpyDirection,
886
+ * ::cudaErrorNoKernelImageForDevice
887
+ * \notefnerr
888
+ * \note_async
889
+ * \note_string_api_deprecation
890
+ * \note_init_rt
891
+ * \note_callback
892
+ *
893
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
894
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
895
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
896
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
897
+ * ::cudaMemcpy2DToArrayAsync,
898
+ * ::cudaMemcpy2DFromArrayAsync,
899
+ * ::cudaMemcpyFromSymbolAsync
900
+ */
901
+ template<class T>
902
+ static __inline__ __host__ cudaError_t cudaMemcpyToSymbolAsync(
903
+ const T &symbol,
904
+ const void *src,
905
+ size_t count,
906
+ size_t offset = 0,
907
+ enum cudaMemcpyKind kind = cudaMemcpyHostToDevice,
908
+ cudaStream_t stream = 0
909
+ )
910
+ {
911
+ return ::cudaMemcpyToSymbolAsync((const void*)&symbol, src, count, offset, kind, stream);
912
+ }
913
+
914
+ /**
915
+ * \brief \hl Copies data from the given symbol on the device
916
+ *
917
+ * Copies \p count bytes from the memory area \p offset bytes
918
+ * from the start of symbol \p symbol to the memory area pointed to by \p dst.
919
+ * The memory areas may not overlap. \p symbol is a variable that
920
+ * resides in global or constant memory space. \p kind can be either
921
+ * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice.
922
+ *
923
+ * \param dst - Destination memory address
924
+ * \param symbol - Device symbol reference
925
+ * \param count - Size in bytes to copy
926
+ * \param offset - Offset from start of symbol in bytes
927
+ * \param kind - Type of transfer
928
+ *
929
+ * \return
930
+ * ::cudaSuccess,
931
+ * ::cudaErrorInvalidValue,
932
+ * ::cudaErrorInvalidSymbol,
933
+ * ::cudaErrorInvalidMemcpyDirection,
934
+ * ::cudaErrorNoKernelImageForDevice
935
+ * \notefnerr
936
+ * \note_sync
937
+ * \note_string_api_deprecation
938
+ * \note_init_rt
939
+ * \note_callback
940
+ *
941
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
942
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
943
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
944
+ * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
945
+ * ::cudaMemcpy2DToArrayAsync,
946
+ * ::cudaMemcpy2DFromArrayAsync,
947
+ * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync
948
+ */
949
+ template<class T>
950
+ static __inline__ __host__ cudaError_t cudaMemcpyFromSymbol(
951
+ void *dst,
952
+ const T &symbol,
953
+ size_t count,
954
+ size_t offset = 0,
955
+ enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost
956
+ )
957
+ {
958
+ return ::cudaMemcpyFromSymbol(dst, (const void*)&symbol, count, offset, kind);
959
+ }
960
+
961
+ /**
962
+ * \brief \hl Copies data from the given symbol on the device
963
+ *
964
+ * Copies \p count bytes from the memory area \p offset bytes
965
+ * from the start of symbol \p symbol to the memory area pointed to by \p dst.
966
+ * The memory areas may not overlap. \p symbol is a variable that resides in
967
+ * global or constant memory space. \p kind can be either
968
+ * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice.
969
+ *
970
+ * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so
971
+ * the call may return before the copy is complete. The copy can optionally be
972
+ * associated to a stream by passing a non-zero \p stream argument. If \p kind
973
+ * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap
974
+ * with operations in other streams.
975
+ *
976
+ * \param dst - Destination memory address
977
+ * \param symbol - Device symbol reference
978
+ * \param count - Size in bytes to copy
979
+ * \param offset - Offset from start of symbol in bytes
980
+ * \param kind - Type of transfer
981
+ * \param stream - Stream identifier
982
+ *
983
+ * \return
984
+ * ::cudaSuccess,
985
+ * ::cudaErrorInvalidValue,
986
+ * ::cudaErrorInvalidSymbol,
987
+ * ::cudaErrorInvalidMemcpyDirection,
988
+ * ::cudaErrorNoKernelImageForDevice
989
+ * \notefnerr
990
+ * \note_async
991
+ * \note_string_api_deprecation
992
+ * \note_init_rt
993
+ * \note_callback
994
+ *
995
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
996
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
997
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
998
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
999
+ * ::cudaMemcpy2DToArrayAsync,
1000
+ * ::cudaMemcpy2DFromArrayAsync,
1001
+ * ::cudaMemcpyToSymbolAsync
1002
+ */
1003
+ template<class T>
1004
+ static __inline__ __host__ cudaError_t cudaMemcpyFromSymbolAsync(
1005
+ void *dst,
1006
+ const T &symbol,
1007
+ size_t count,
1008
+ size_t offset = 0,
1009
+ enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost,
1010
+ cudaStream_t stream = 0
1011
+ )
1012
+ {
1013
+ return ::cudaMemcpyFromSymbolAsync(dst, (const void*)&symbol, count, offset, kind, stream);
1014
+ }
1015
+
1016
+ /**
1017
+ * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph
1018
+ *
1019
+ * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with
1020
+ * \p numDependencies dependencies specified via \p pDependencies.
1021
+ * It is possible for \p numDependencies to be 0, in which case the node will be placed
1022
+ * at the root of the graph. \p pDependencies may not have any duplicate entries.
1023
+ * A handle to the new node will be returned in \p pGraphNode.
1024
+ *
1025
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1026
+ * pointed to by \p src to the memory area pointed to by \p offset bytes from the start
1027
+ * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that
1028
+ * resides in global or constant memory space. \p kind can be either
1029
+ * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1030
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of
1031
+ * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault
1032
+ * is only allowed on systems that support unified virtual addressing.
1033
+ *
1034
+ * Memcpy nodes have some additional restrictions with regards to managed memory, if the
1035
+ * system contains at least one device which has a zero value for the device attribute
1036
+ * ::cudaDevAttrConcurrentManagedAccess.
1037
+ *
1038
+ * \param pGraphNode - Returns newly created node
1039
+ * \param graph - Graph to which to add the node
1040
+ * \param pDependencies - Dependencies of the node
1041
+ * \param numDependencies - Number of dependencies
1042
+ * \param symbol - Device symbol address
1043
+ * \param src - Source memory address
1044
+ * \param count - Size in bytes to copy
1045
+ * \param offset - Offset from start of symbol in bytes
1046
+ * \param kind - Type of transfer
1047
+ *
1048
+ * \return
1049
+ * ::cudaSuccess,
1050
+ * ::cudaErrorInvalidValue
1051
+ * \note_graph_thread_safety
1052
+ * \notefnerr
1053
+ * \note_init_rt
1054
+ * \note_callback
1055
+ *
1056
+ * \sa
1057
+ * ::cudaMemcpyToSymbol,
1058
+ * ::cudaGraphAddMemcpyNode,
1059
+ * ::cudaGraphAddMemcpyNodeFromSymbol,
1060
+ * ::cudaGraphMemcpyNodeGetParams,
1061
+ * ::cudaGraphMemcpyNodeSetParams,
1062
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1063
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1064
+ * ::cudaGraphCreate,
1065
+ * ::cudaGraphDestroyNode,
1066
+ * ::cudaGraphAddChildGraphNode,
1067
+ * ::cudaGraphAddEmptyNode,
1068
+ * ::cudaGraphAddKernelNode,
1069
+ * ::cudaGraphAddHostNode,
1070
+ * ::cudaGraphAddMemsetNode
1071
+ */
1072
+ template<class T>
1073
+ static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeToSymbol(
1074
+ cudaGraphNode_t *pGraphNode,
1075
+ cudaGraph_t graph,
1076
+ const cudaGraphNode_t *pDependencies,
1077
+ size_t numDependencies,
1078
+ const T &symbol,
1079
+ const void* src,
1080
+ size_t count,
1081
+ size_t offset,
1082
+ enum cudaMemcpyKind kind)
1083
+ {
1084
+ return ::cudaGraphAddMemcpyNodeToSymbol(pGraphNode, graph, pDependencies, numDependencies, (const void*)&symbol, src, count, offset, kind);
1085
+ }
1086
+
1087
+ /**
1088
+ * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph
1089
+ *
1090
+ * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with
1091
+ * \p numDependencies dependencies specified via \p pDependencies.
1092
+ * It is possible for \p numDependencies to be 0, in which case the node will be placed
1093
+ * at the root of the graph. \p pDependencies may not have any duplicate entries.
1094
+ * A handle to the new node will be returned in \p pGraphNode.
1095
+ *
1096
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1097
+ * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area
1098
+ * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable
1099
+ * that resides in global or constant memory space. \p kind can be either
1100
+ * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1101
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer
1102
+ * is inferred from the pointer values. However, ::cudaMemcpyDefault is only
1103
+ * allowed on systems that support unified virtual addressing.
1104
+ *
1105
+ * Memcpy nodes have some additional restrictions with regards to managed memory, if the
1106
+ * system contains at least one device which has a zero value for the device attribute
1107
+ * ::cudaDevAttrConcurrentManagedAccess.
1108
+ *
1109
+ * \param pGraphNode - Returns newly created node
1110
+ * \param graph - Graph to which to add the node
1111
+ * \param pDependencies - Dependencies of the node
1112
+ * \param numDependencies - Number of dependencies
1113
+ * \param dst - Destination memory address
1114
+ * \param symbol - Device symbol address
1115
+ * \param count - Size in bytes to copy
1116
+ * \param offset - Offset from start of symbol in bytes
1117
+ * \param kind - Type of transfer
1118
+ *
1119
+ * \return
1120
+ * ::cudaSuccess,
1121
+ * ::cudaErrorInvalidValue
1122
+ * \note_graph_thread_safety
1123
+ * \notefnerr
1124
+ * \note_init_rt
1125
+ * \note_callback
1126
+ *
1127
+ * \sa
1128
+ * ::cudaMemcpyFromSymbol,
1129
+ * ::cudaGraphAddMemcpyNode,
1130
+ * ::cudaGraphAddMemcpyNodeToSymbol,
1131
+ * ::cudaGraphMemcpyNodeGetParams,
1132
+ * ::cudaGraphMemcpyNodeSetParams,
1133
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1134
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1135
+ * ::cudaGraphCreate,
1136
+ * ::cudaGraphDestroyNode,
1137
+ * ::cudaGraphAddChildGraphNode,
1138
+ * ::cudaGraphAddEmptyNode,
1139
+ * ::cudaGraphAddKernelNode,
1140
+ * ::cudaGraphAddHostNode,
1141
+ * ::cudaGraphAddMemsetNode
1142
+ */
1143
+ template<class T>
1144
+ static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeFromSymbol(
1145
+ cudaGraphNode_t* pGraphNode,
1146
+ cudaGraph_t graph,
1147
+ const cudaGraphNode_t* pDependencies,
1148
+ size_t numDependencies,
1149
+ void* dst,
1150
+ const T &symbol,
1151
+ size_t count,
1152
+ size_t offset,
1153
+ enum cudaMemcpyKind kind)
1154
+ {
1155
+ return ::cudaGraphAddMemcpyNodeFromSymbol(pGraphNode, graph, pDependencies, numDependencies, dst, (const void*)&symbol, count, offset, kind);
1156
+ }
1157
+
1158
+ /**
1159
+ * \brief Sets a memcpy node's parameters to copy to a symbol on the device
1160
+ *
1161
+ * Sets the parameters of memcpy node \p node to the copy described by the provided parameters.
1162
+ *
1163
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1164
+ * pointed to by \p src to the memory area pointed to by \p offset bytes from the start
1165
+ * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that
1166
+ * resides in global or constant memory space. \p kind can be either
1167
+ * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1168
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of
1169
+ * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault
1170
+ * is only allowed on systems that support unified virtual addressing.
1171
+ *
1172
+ * \param node - Node to set the parameters for
1173
+ * \param symbol - Device symbol address
1174
+ * \param src - Source memory address
1175
+ * \param count - Size in bytes to copy
1176
+ * \param offset - Offset from start of symbol in bytes
1177
+ * \param kind - Type of transfer
1178
+ *
1179
+ * \return
1180
+ * ::cudaSuccess,
1181
+ * ::cudaErrorInvalidValue
1182
+ * \note_graph_thread_safety
1183
+ * \notefnerr
1184
+ * \note_init_rt
1185
+ * \note_callback
1186
+ *
1187
+ * \sa
1188
+ * ::cudaMemcpyToSymbol,
1189
+ * ::cudaGraphMemcpyNodeSetParams,
1190
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1191
+ * ::cudaGraphAddMemcpyNode,
1192
+ * ::cudaGraphMemcpyNodeGetParams
1193
+ */
1194
+ template<class T>
1195
+ static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsToSymbol(
1196
+ cudaGraphNode_t node,
1197
+ const T &symbol,
1198
+ const void* src,
1199
+ size_t count,
1200
+ size_t offset,
1201
+ enum cudaMemcpyKind kind)
1202
+ {
1203
+ return ::cudaGraphMemcpyNodeSetParamsToSymbol(node, (const void*)&symbol, src, count, offset, kind);
1204
+ }
1205
+
1206
+ /**
1207
+ * \brief Sets a memcpy node's parameters to copy from a symbol on the device
1208
+ *
1209
+ * Sets the parameters of memcpy node \p node to the copy described by the provided parameters.
1210
+ *
1211
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1212
+ * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area
1213
+ * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable
1214
+ * that resides in global or constant memory space. \p kind can be either
1215
+ * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1216
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer
1217
+ * is inferred from the pointer values. However, ::cudaMemcpyDefault is only
1218
+ * allowed on systems that support unified virtual addressing.
1219
+ *
1220
+ * \param node - Node to set the parameters for
1221
+ * \param dst - Destination memory address
1222
+ * \param symbol - Device symbol address
1223
+ * \param count - Size in bytes to copy
1224
+ * \param offset - Offset from start of symbol in bytes
1225
+ * \param kind - Type of transfer
1226
+ *
1227
+ * \return
1228
+ * ::cudaSuccess,
1229
+ * ::cudaErrorInvalidValue
1230
+ * \note_graph_thread_safety
1231
+ * \notefnerr
1232
+ * \note_init_rt
1233
+ * \note_callback
1234
+ *
1235
+ * \sa
1236
+ * ::cudaMemcpyFromSymbol,
1237
+ * ::cudaGraphMemcpyNodeSetParams,
1238
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1239
+ * ::cudaGraphAddMemcpyNode,
1240
+ * ::cudaGraphMemcpyNodeGetParams
1241
+ */
1242
+ template<class T>
1243
+ static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsFromSymbol(
1244
+ cudaGraphNode_t node,
1245
+ void* dst,
1246
+ const T &symbol,
1247
+ size_t count,
1248
+ size_t offset,
1249
+ enum cudaMemcpyKind kind)
1250
+ {
1251
+ return ::cudaGraphMemcpyNodeSetParamsFromSymbol(node, dst, (const void*)&symbol, count, offset, kind);
1252
+ }
1253
+
1254
+ /**
1255
+ * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device
1256
+ *
1257
+ * Updates the work represented by \p node in \p hGraphExec as though \p node had
1258
+ * contained the given params at instantiation. \p node must remain in the graph which was
1259
+ * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored.
1260
+ *
1261
+ * \p src and \p symbol must be allocated from the same contexts as the original source and
1262
+ * destination memory. The instantiation-time memory operands must be 1-dimensional.
1263
+ * Zero-length operations are not supported.
1264
+ *
1265
+ * The modifications only affect future launches of \p hGraphExec. Already enqueued
1266
+ * or running launches of \p hGraphExec are not affected by this call. \p node is also
1267
+ * not modified by this call.
1268
+ *
1269
+ * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or
1270
+ * the original memory operands are multidimensional.
1271
+ *
1272
+ * \param hGraphExec - The executable graph in which to set the specified node
1273
+ * \param node - Memcpy node from the graph which was used to instantiate graphExec
1274
+ * \param symbol - Device symbol address
1275
+ * \param src - Source memory address
1276
+ * \param count - Size in bytes to copy
1277
+ * \param offset - Offset from start of symbol in bytes
1278
+ * \param kind - Type of transfer
1279
+ *
1280
+ * \return
1281
+ * ::cudaSuccess,
1282
+ * ::cudaErrorInvalidValue
1283
+ * \note_graph_thread_safety
1284
+ * \notefnerr
1285
+ * \note_init_rt
1286
+ * \note_callback
1287
+ *
1288
+ * \sa
1289
+ * ::cudaGraphAddMemcpyNode,
1290
+ * ::cudaGraphAddMemcpyNodeToSymbol,
1291
+ * ::cudaGraphMemcpyNodeSetParams,
1292
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1293
+ * ::cudaGraphInstantiate,
1294
+ * ::cudaGraphExecMemcpyNodeSetParams,
1295
+ * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol,
1296
+ * ::cudaGraphExecKernelNodeSetParams,
1297
+ * ::cudaGraphExecMemsetNodeSetParams,
1298
+ * ::cudaGraphExecHostNodeSetParams
1299
+ */
1300
+ template<class T>
1301
+ static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsToSymbol(
1302
+ cudaGraphExec_t hGraphExec,
1303
+ cudaGraphNode_t node,
1304
+ const T &symbol,
1305
+ const void* src,
1306
+ size_t count,
1307
+ size_t offset,
1308
+ enum cudaMemcpyKind kind)
1309
+ {
1310
+ return ::cudaGraphExecMemcpyNodeSetParamsToSymbol(hGraphExec, node, (const void*)&symbol, src, count, offset, kind);
1311
+ }
1312
+
1313
+ /**
1314
+ * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device
1315
+ *
1316
+ * Updates the work represented by \p node in \p hGraphExec as though \p node had
1317
+ * contained the given params at instantiation. \p node must remain in the graph which was
1318
+ * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored.
1319
+ *
1320
+ * \p symbol and \p dst must be allocated from the same contexts as the original source and
1321
+ * destination memory. The instantiation-time memory operands must be 1-dimensional.
1322
+ * Zero-length operations are not supported.
1323
+ *
1324
+ * The modifications only affect future launches of \p hGraphExec. Already enqueued
1325
+ * or running launches of \p hGraphExec are not affected by this call. \p node is also
1326
+ * not modified by this call.
1327
+ *
1328
+ * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or
1329
+ * the original memory operands are multidimensional.
1330
+ *
1331
+ * \param hGraphExec - The executable graph in which to set the specified node
1332
+ * \param node - Memcpy node from the graph which was used to instantiate graphExec
1333
+ * \param dst - Destination memory address
1334
+ * \param symbol - Device symbol address
1335
+ * \param count - Size in bytes to copy
1336
+ * \param offset - Offset from start of symbol in bytes
1337
+ * \param kind - Type of transfer
1338
+ *
1339
+ * \return
1340
+ * ::cudaSuccess,
1341
+ * ::cudaErrorInvalidValue
1342
+ * \note_graph_thread_safety
1343
+ * \notefnerr
1344
+ * \note_init_rt
1345
+ * \note_callback
1346
+ *
1347
+ * \sa
1348
+ * ::cudaGraphAddMemcpyNode,
1349
+ * ::cudaGraphAddMemcpyNodeFromSymbol,
1350
+ * ::cudaGraphMemcpyNodeSetParams,
1351
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1352
+ * ::cudaGraphInstantiate,
1353
+ * ::cudaGraphExecMemcpyNodeSetParams,
1354
+ * ::cudaGraphExecMemcpyNodeSetParamsToSymbol,
1355
+ * ::cudaGraphExecKernelNodeSetParams,
1356
+ * ::cudaGraphExecMemsetNodeSetParams,
1357
+ * ::cudaGraphExecHostNodeSetParams
1358
+ */
1359
+ template<class T>
1360
+ static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsFromSymbol(
1361
+ cudaGraphExec_t hGraphExec,
1362
+ cudaGraphNode_t node,
1363
+ void* dst,
1364
+ const T &symbol,
1365
+ size_t count,
1366
+ size_t offset,
1367
+ enum cudaMemcpyKind kind)
1368
+ {
1369
+ return ::cudaGraphExecMemcpyNodeSetParamsFromSymbol(hGraphExec, node, dst, (const void*)&symbol, count, offset, kind);
1370
+ }
1371
+
1372
+ // convenience function to avoid source breakage in c++ code
1373
+ static __inline__ __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out)
1374
+ {
1375
+ cudaGraphExecUpdateResultInfo resultInfo;
1376
+ cudaError_t status = cudaGraphExecUpdate(hGraphExec, hGraph, &resultInfo);
1377
+ if (hErrorNode_out) {
1378
+ *hErrorNode_out = resultInfo.errorNode;
1379
+ }
1380
+ if (updateResult_out) {
1381
+ *updateResult_out = resultInfo.result;
1382
+ }
1383
+ return status;
1384
+ }
1385
+
1386
+ #if __cplusplus >= 201103
1387
+
1388
+ /**
1389
+ * \brief Creates a user object by wrapping a C++ object
1390
+ *
1391
+ * TODO detail
1392
+ *
1393
+ * \param object_out - Location to return the user object handle
1394
+ * \param objectToWrap - This becomes the \ptr argument to ::cudaUserObjectCreate. A
1395
+ * lambda will be passed for the \p destroy argument, which calls
1396
+ * delete on this object pointer.
1397
+ * \param initialRefcount - The initial refcount to create the object with, typically 1. The
1398
+ * initial references are owned by the calling thread.
1399
+ * \param flags - Currently it is required to pass cudaUserObjectNoDestructorSync,
1400
+ * which is the only defined flag. This indicates that the destroy
1401
+ * callback cannot be waited on by any CUDA API. Users requiring
1402
+ * synchronization of the callback should signal its completion
1403
+ * manually.
1404
+ *
1405
+ * \return
1406
+ * ::cudaSuccess,
1407
+ * ::cudaErrorInvalidValue
1408
+ *
1409
+ * \sa
1410
+ * ::cudaUserObjectCreate
1411
+ */
1412
+ template<class T>
1413
+ static __inline__ __host__ cudaError_t cudaUserObjectCreate(
1414
+ cudaUserObject_t *object_out,
1415
+ T *objectToWrap,
1416
+ unsigned int initialRefcount,
1417
+ unsigned int flags)
1418
+ {
1419
+ return ::cudaUserObjectCreate(
1420
+ object_out,
1421
+ objectToWrap,
1422
+ [](void *vpObj) { delete reinterpret_cast<T *>(vpObj); },
1423
+ initialRefcount,
1424
+ flags);
1425
+ }
1426
+
1427
+ template<class T>
1428
+ static __inline__ __host__ cudaError_t cudaUserObjectCreate(
1429
+ cudaUserObject_t *object_out,
1430
+ T *objectToWrap,
1431
+ unsigned int initialRefcount,
1432
+ cudaUserObjectFlags flags)
1433
+ {
1434
+ return cudaUserObjectCreate(object_out, objectToWrap, initialRefcount, (unsigned int)flags);
1435
+ }
1436
+
1437
+ #endif
1438
+
1439
+ /**
1440
+ * \brief \hl Finds the address associated with a CUDA symbol
1441
+ *
1442
+ * Returns in \p *devPtr the address of symbol \p symbol on the device.
1443
+ * \p symbol can either be a variable that resides in global or constant memory space.
1444
+ * If \p symbol cannot be found, or if \p symbol is not declared
1445
+ * in the global or constant memory space, \p *devPtr is unchanged and the error
1446
+ * ::cudaErrorInvalidSymbol is returned.
1447
+ *
1448
+ * \param devPtr - Return device pointer associated with symbol
1449
+ * \param symbol - Device symbol reference
1450
+ *
1451
+ * \return
1452
+ * ::cudaSuccess,
1453
+ * ::cudaErrorInvalidSymbol,
1454
+ * ::cudaErrorNoKernelImageForDevice
1455
+ * \notefnerr
1456
+ * \note_init_rt
1457
+ * \note_callback
1458
+ *
1459
+ * \sa \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)",
1460
+ * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)"
1461
+ */
1462
+ template<class T>
1463
+ static __inline__ __host__ cudaError_t cudaGetSymbolAddress(
1464
+ void **devPtr,
1465
+ const T &symbol
1466
+ )
1467
+ {
1468
+ return ::cudaGetSymbolAddress(devPtr, (const void*)&symbol);
1469
+ }
1470
+
1471
+ /**
1472
+ * \brief \hl Finds the size of the object associated with a CUDA symbol
1473
+ *
1474
+ * Returns in \p *size the size of symbol \p symbol. \p symbol must be a
1475
+ * variable that resides in global or constant memory space.
1476
+ * If \p symbol cannot be found, or if \p symbol is not declared
1477
+ * in global or constant memory space, \p *size is unchanged and the error
1478
+ * ::cudaErrorInvalidSymbol is returned.
1479
+ *
1480
+ * \param size - Size of object associated with symbol
1481
+ * \param symbol - Device symbol reference
1482
+ *
1483
+ * \return
1484
+ * ::cudaSuccess,
1485
+ * ::cudaErrorInvalidSymbol,
1486
+ * ::cudaErrorNoKernelImageForDevice
1487
+ * \notefnerr
1488
+ * \note_init_rt
1489
+ * \note_callback
1490
+ *
1491
+ * \sa \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)",
1492
+ * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)"
1493
+ */
1494
+ template<class T>
1495
+ static __inline__ __host__ cudaError_t cudaGetSymbolSize(
1496
+ size_t *size,
1497
+ const T &symbol
1498
+ )
1499
+ {
1500
+ return ::cudaGetSymbolSize(size, (const void*)&symbol);
1501
+ }
1502
+
1503
+ /**
1504
+ * \brief \hl Sets the preferred cache configuration for a device function
1505
+ *
1506
+ * On devices where the L1 cache and shared memory use the same hardware
1507
+ * resources, this sets through \p cacheConfig the preferred cache configuration
1508
+ * for the function specified via \p func. This is only a preference. The
1509
+ * runtime will use the requested configuration if possible, but it is free to
1510
+ * choose a different configuration if required to execute \p func.
1511
+ *
1512
+ * \p func must be a pointer to a function that executes on the device.
1513
+ * The parameter specified by \p func must be declared as a \p __global__
1514
+ * function. If the specified function does not exist,
1515
+ * then ::cudaErrorInvalidDeviceFunction is returned.
1516
+ *
1517
+ * This setting does nothing on devices where the size of the L1 cache and
1518
+ * shared memory are fixed.
1519
+ *
1520
+ * Launching a kernel with a different preference than the most recent
1521
+ * preference setting may insert a device-side synchronization point.
1522
+ *
1523
+ * The supported cache configurations are:
1524
+ * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)
1525
+ * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
1526
+ * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
1527
+ *
1528
+ * \param func - device function pointer
1529
+ * \param cacheConfig - Requested cache configuration
1530
+ *
1531
+ * \return
1532
+ * ::cudaSuccess,
1533
+ * ::cudaErrorInvalidDeviceFunction
1534
+ * \notefnerr
1535
+ * \note_init_rt
1536
+ * \note_callback
1537
+ *
1538
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
1539
+ * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)",
1540
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)",
1541
+ * ::cudaSetDoubleForDevice,
1542
+ * ::cudaSetDoubleForHost,
1543
+ * ::cudaThreadGetCacheConfig,
1544
+ * ::cudaThreadSetCacheConfig
1545
+ */
1546
+ template<class T>
1547
+ static __inline__ __host__ cudaError_t cudaFuncSetCacheConfig(
1548
+ T *func,
1549
+ enum cudaFuncCache cacheConfig
1550
+ )
1551
+ {
1552
+ return ::cudaFuncSetCacheConfig((const void*)func, cacheConfig);
1553
+ }
1554
+
1555
+ template<class T>
1556
+ static __inline__ __host__ cudaError_t cudaFuncSetSharedMemConfig(
1557
+ T *func,
1558
+ enum cudaSharedMemConfig config
1559
+ )
1560
+ {
1561
+ return ::cudaFuncSetSharedMemConfig((const void*)func, config);
1562
+ }
1563
+
1564
+ #endif // __CUDACC__
1565
+
1566
+ /**
1567
+ * \brief Returns occupancy for a device function
1568
+ *
1569
+ * Returns in \p *numBlocks the maximum number of active blocks per
1570
+ * streaming multiprocessor for the device function.
1571
+ *
1572
+ * \param numBlocks - Returned occupancy
1573
+ * \param func - Kernel function for which occupancy is calulated
1574
+ * \param blockSize - Block size the kernel is intended to be launched with
1575
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1576
+ *
1577
+ * \return
1578
+ * ::cudaSuccess,
1579
+ * ::cudaErrorInvalidDevice,
1580
+ * ::cudaErrorInvalidDeviceFunction,
1581
+ * ::cudaErrorInvalidValue,
1582
+ * ::cudaErrorUnknown,
1583
+ * \notefnerr
1584
+ * \note_init_rt
1585
+ * \note_callback
1586
+ *
1587
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1588
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1589
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1590
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1591
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1592
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1593
+ */
1594
+ template<class T>
1595
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(
1596
+ int *numBlocks,
1597
+ T func,
1598
+ int blockSize,
1599
+ size_t dynamicSMemSize)
1600
+ {
1601
+ return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, cudaOccupancyDefault);
1602
+ }
1603
+
1604
+ /**
1605
+ * \brief Returns occupancy for a device function with the specified flags
1606
+ *
1607
+ * Returns in \p *numBlocks the maximum number of active blocks per
1608
+ * streaming multiprocessor for the device function.
1609
+ *
1610
+ * The \p flags parameter controls how special cases are handled. Valid flags include:
1611
+ *
1612
+ * - ::cudaOccupancyDefault: keeps the default behavior as
1613
+ * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1614
+ *
1615
+ * - ::cudaOccupancyDisableCachingOverride: suppresses the default behavior
1616
+ * on platform where global caching affects occupancy. On such platforms, if caching
1617
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
1618
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
1619
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
1620
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
1621
+ * section of the Maxwell tuning guide.
1622
+ *
1623
+ * \param numBlocks - Returned occupancy
1624
+ * \param func - Kernel function for which occupancy is calulated
1625
+ * \param blockSize - Block size the kernel is intended to be launched with
1626
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1627
+ * \param flags - Requested behavior for the occupancy calculator
1628
+ *
1629
+ * \return
1630
+ * ::cudaSuccess,
1631
+ * ::cudaErrorInvalidDevice,
1632
+ * ::cudaErrorInvalidDeviceFunction,
1633
+ * ::cudaErrorInvalidValue,
1634
+ * ::cudaErrorUnknown,
1635
+ * \notefnerr
1636
+ * \note_init_rt
1637
+ * \note_callback
1638
+ *
1639
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1640
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1641
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1642
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1643
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1644
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1645
+ */
1646
+ template<class T>
1647
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
1648
+ int *numBlocks,
1649
+ T func,
1650
+ int blockSize,
1651
+ size_t dynamicSMemSize,
1652
+ unsigned int flags)
1653
+ {
1654
+ return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, flags);
1655
+ }
1656
+
1657
+ /**
1658
+ * Helper functor for cudaOccupancyMaxPotentialBlockSize
1659
+ */
1660
+ class __cudaOccupancyB2DHelper {
1661
+ size_t n;
1662
+ public:
1663
+ inline __host__ CUDART_DEVICE __cudaOccupancyB2DHelper(size_t n_) : n(n_) {}
1664
+ inline __host__ CUDART_DEVICE size_t operator()(int)
1665
+ {
1666
+ return n;
1667
+ }
1668
+ };
1669
+
1670
+ /**
1671
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1672
+ *
1673
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1674
+ * block size pair that achieves the best potential occupancy
1675
+ * (i.e. the maximum number of active warps with the smallest number
1676
+ * of blocks).
1677
+ *
1678
+ * The \p flags parameter controls how special cases are handled. Valid flags include:
1679
+ *
1680
+ * - ::cudaOccupancyDefault: keeps the default behavior as
1681
+ * ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1682
+ *
1683
+ * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior
1684
+ * on platform where global caching affects occupancy. On such platforms, if caching
1685
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
1686
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
1687
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
1688
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
1689
+ * section of the Maxwell tuning guide.
1690
+ *
1691
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1692
+ * \param blockSize - Returned block size
1693
+ * \param func - Device function symbol
1694
+ * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
1695
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1696
+ * \param flags - Requested behavior for the occupancy calculator
1697
+ *
1698
+ * \return
1699
+ * ::cudaSuccess,
1700
+ * ::cudaErrorInvalidDevice,
1701
+ * ::cudaErrorInvalidDeviceFunction,
1702
+ * ::cudaErrorInvalidValue,
1703
+ * ::cudaErrorUnknown,
1704
+ * \notefnerr
1705
+ * \note_init_rt
1706
+ * \note_callback
1707
+ *
1708
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1709
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1710
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1711
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1712
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1713
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1714
+ */
1715
+
1716
+ template<typename UnaryFunction, class T>
1717
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(
1718
+ int *minGridSize,
1719
+ int *blockSize,
1720
+ T func,
1721
+ UnaryFunction blockSizeToDynamicSMemSize,
1722
+ int blockSizeLimit = 0,
1723
+ unsigned int flags = 0)
1724
+ {
1725
+ cudaError_t status;
1726
+
1727
+ // Device and function properties
1728
+ int device;
1729
+ struct cudaFuncAttributes attr;
1730
+
1731
+ // Limits
1732
+ int maxThreadsPerMultiProcessor;
1733
+ int warpSize;
1734
+ int devMaxThreadsPerBlock;
1735
+ int multiProcessorCount;
1736
+ int funcMaxThreadsPerBlock;
1737
+ int occupancyLimit;
1738
+ int granularity;
1739
+
1740
+ // Recorded maximum
1741
+ int maxBlockSize = 0;
1742
+ int numBlocks = 0;
1743
+ int maxOccupancy = 0;
1744
+
1745
+ // Temporary
1746
+ int blockSizeToTryAligned;
1747
+ int blockSizeToTry;
1748
+ int blockSizeLimitAligned;
1749
+ int occupancyInBlocks;
1750
+ int occupancyInThreads;
1751
+ size_t dynamicSMemSize;
1752
+
1753
+ ///////////////////////////
1754
+ // Check user input
1755
+ ///////////////////////////
1756
+
1757
+ if (!minGridSize || !blockSize || !func) {
1758
+ return cudaErrorInvalidValue;
1759
+ }
1760
+
1761
+ //////////////////////////////////////////////
1762
+ // Obtain device and function properties
1763
+ //////////////////////////////////////////////
1764
+
1765
+ status = ::cudaGetDevice(&device);
1766
+ if (status != cudaSuccess) {
1767
+ return status;
1768
+ }
1769
+
1770
+ status = cudaDeviceGetAttribute(
1771
+ &maxThreadsPerMultiProcessor,
1772
+ cudaDevAttrMaxThreadsPerMultiProcessor,
1773
+ device);
1774
+ if (status != cudaSuccess) {
1775
+ return status;
1776
+ }
1777
+
1778
+ status = cudaDeviceGetAttribute(
1779
+ &warpSize,
1780
+ cudaDevAttrWarpSize,
1781
+ device);
1782
+ if (status != cudaSuccess) {
1783
+ return status;
1784
+ }
1785
+
1786
+ status = cudaDeviceGetAttribute(
1787
+ &devMaxThreadsPerBlock,
1788
+ cudaDevAttrMaxThreadsPerBlock,
1789
+ device);
1790
+ if (status != cudaSuccess) {
1791
+ return status;
1792
+ }
1793
+
1794
+ status = cudaDeviceGetAttribute(
1795
+ &multiProcessorCount,
1796
+ cudaDevAttrMultiProcessorCount,
1797
+ device);
1798
+ if (status != cudaSuccess) {
1799
+ return status;
1800
+ }
1801
+
1802
+ status = cudaFuncGetAttributes(&attr, func);
1803
+ if (status != cudaSuccess) {
1804
+ return status;
1805
+ }
1806
+
1807
+ funcMaxThreadsPerBlock = attr.maxThreadsPerBlock;
1808
+
1809
+ /////////////////////////////////////////////////////////////////////////////////
1810
+ // Try each block size, and pick the block size with maximum occupancy
1811
+ /////////////////////////////////////////////////////////////////////////////////
1812
+
1813
+ occupancyLimit = maxThreadsPerMultiProcessor;
1814
+ granularity = warpSize;
1815
+
1816
+ if (blockSizeLimit == 0) {
1817
+ blockSizeLimit = devMaxThreadsPerBlock;
1818
+ }
1819
+
1820
+ if (devMaxThreadsPerBlock < blockSizeLimit) {
1821
+ blockSizeLimit = devMaxThreadsPerBlock;
1822
+ }
1823
+
1824
+ if (funcMaxThreadsPerBlock < blockSizeLimit) {
1825
+ blockSizeLimit = funcMaxThreadsPerBlock;
1826
+ }
1827
+
1828
+ blockSizeLimitAligned = ((blockSizeLimit + (granularity - 1)) / granularity) * granularity;
1829
+
1830
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1831
+ // This is needed for the first iteration, because
1832
+ // blockSizeLimitAligned could be greater than blockSizeLimit
1833
+ //
1834
+ if (blockSizeLimit < blockSizeToTryAligned) {
1835
+ blockSizeToTry = blockSizeLimit;
1836
+ } else {
1837
+ blockSizeToTry = blockSizeToTryAligned;
1838
+ }
1839
+
1840
+ dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
1841
+
1842
+ status = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
1843
+ &occupancyInBlocks,
1844
+ func,
1845
+ blockSizeToTry,
1846
+ dynamicSMemSize,
1847
+ flags);
1848
+
1849
+ if (status != cudaSuccess) {
1850
+ return status;
1851
+ }
1852
+
1853
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1854
+
1855
+ if (occupancyInThreads > maxOccupancy) {
1856
+ maxBlockSize = blockSizeToTry;
1857
+ numBlocks = occupancyInBlocks;
1858
+ maxOccupancy = occupancyInThreads;
1859
+ }
1860
+
1861
+ // Early out if we have reached the maximum
1862
+ //
1863
+ if (occupancyLimit == maxOccupancy) {
1864
+ break;
1865
+ }
1866
+ }
1867
+
1868
+ ///////////////////////////
1869
+ // Return best available
1870
+ ///////////////////////////
1871
+
1872
+ // Suggested min grid size to achieve a full machine launch
1873
+ //
1874
+ *minGridSize = numBlocks * multiProcessorCount;
1875
+ *blockSize = maxBlockSize;
1876
+
1877
+ return status;
1878
+ }
1879
+
1880
+ /**
1881
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1882
+ *
1883
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1884
+ * block size pair that achieves the best potential occupancy
1885
+ * (i.e. the maximum number of active warps with the smallest number
1886
+ * of blocks).
1887
+ *
1888
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1889
+ * \param blockSize - Returned block size
1890
+ * \param func - Device function symbol
1891
+ * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
1892
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1893
+ *
1894
+ * \return
1895
+ * ::cudaSuccess,
1896
+ * ::cudaErrorInvalidDevice,
1897
+ * ::cudaErrorInvalidDeviceFunction,
1898
+ * ::cudaErrorInvalidValue,
1899
+ * ::cudaErrorUnknown,
1900
+ * \notefnerr
1901
+ * \note_init_rt
1902
+ * \note_callback
1903
+ *
1904
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1905
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1906
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1907
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1908
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1909
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1910
+ */
1911
+
1912
+ template<typename UnaryFunction, class T>
1913
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem(
1914
+ int *minGridSize,
1915
+ int *blockSize,
1916
+ T func,
1917
+ UnaryFunction blockSizeToDynamicSMemSize,
1918
+ int blockSizeLimit = 0)
1919
+ {
1920
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, blockSizeLimit, cudaOccupancyDefault);
1921
+ }
1922
+
1923
+ /**
1924
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1925
+ *
1926
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1927
+ * block size pair that achieves the best potential occupancy
1928
+ * (i.e. the maximum number of active warps with the smallest number
1929
+ * of blocks).
1930
+ *
1931
+ * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the
1932
+ * amount of per-block dynamic shared memory changes with different
1933
+ * block sizes.
1934
+ *
1935
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1936
+ * \param blockSize - Returned block size
1937
+ * \param func - Device function symbol
1938
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1939
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1940
+ *
1941
+ * \return
1942
+ * ::cudaSuccess,
1943
+ * ::cudaErrorInvalidDevice,
1944
+ * ::cudaErrorInvalidDeviceFunction,
1945
+ * ::cudaErrorInvalidValue,
1946
+ * ::cudaErrorUnknown,
1947
+ * \notefnerr
1948
+ * \note_init_rt
1949
+ * \note_callback
1950
+ *
1951
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1952
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1953
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1954
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1955
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1956
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1957
+ */
1958
+ template<class T>
1959
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSize(
1960
+ int *minGridSize,
1961
+ int *blockSize,
1962
+ T func,
1963
+ size_t dynamicSMemSize = 0,
1964
+ int blockSizeLimit = 0)
1965
+ {
1966
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, cudaOccupancyDefault);
1967
+ }
1968
+
1969
+ /**
1970
+ * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM.
1971
+ *
1972
+ * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM.
1973
+ *
1974
+ * \param dynamicSmemSize - Returned maximum dynamic shared memory
1975
+ * \param func - Kernel function for which occupancy is calculated
1976
+ * \param numBlocks - Number of blocks to fit on SM
1977
+ * \param blockSize - Size of the block
1978
+ *
1979
+ * \return
1980
+ * ::cudaSuccess,
1981
+ * ::cudaErrorInvalidDevice,
1982
+ * ::cudaErrorInvalidDeviceFunction,
1983
+ * ::cudaErrorInvalidValue,
1984
+ * ::cudaErrorUnknown,
1985
+ * \notefnerr
1986
+ * \note_init_rt
1987
+ * \note_callback
1988
+ *
1989
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1990
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1991
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1992
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1993
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1994
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1995
+ */
1996
+ template<class T>
1997
+ static __inline__ __host__ cudaError_t cudaOccupancyAvailableDynamicSMemPerBlock(
1998
+ size_t *dynamicSmemSize,
1999
+ T func,
2000
+ int numBlocks,
2001
+ int blockSize)
2002
+ {
2003
+ return ::cudaOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, (const void*)func, numBlocks, blockSize);
2004
+ }
2005
+
2006
+ /**
2007
+ * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags
2008
+ *
2009
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
2010
+ * block size pair that achieves the best potential occupancy
2011
+ * (i.e. the maximum number of active warps with the smallest number
2012
+ * of blocks).
2013
+ *
2014
+ * The \p flags parameter controls how special cases are handle. Valid flags include:
2015
+ *
2016
+ * - ::cudaOccupancyDefault: keeps the default behavior as
2017
+ * ::cudaOccupancyMaxPotentialBlockSize
2018
+ *
2019
+ * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior
2020
+ * on platform where global caching affects occupancy. On such platforms, if caching
2021
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
2022
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
2023
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
2024
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
2025
+ * section of the Maxwell tuning guide.
2026
+ *
2027
+ * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the
2028
+ * amount of per-block dynamic shared memory changes with different
2029
+ * block sizes.
2030
+ *
2031
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
2032
+ * \param blockSize - Returned block size
2033
+ * \param func - Device function symbol
2034
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
2035
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
2036
+ * \param flags - Requested behavior for the occupancy calculator
2037
+ *
2038
+ * \return
2039
+ * ::cudaSuccess,
2040
+ * ::cudaErrorInvalidDevice,
2041
+ * ::cudaErrorInvalidDeviceFunction,
2042
+ * ::cudaErrorInvalidValue,
2043
+ * ::cudaErrorUnknown,
2044
+ * \notefnerr
2045
+ * \note_init_rt
2046
+ * \note_callback
2047
+ *
2048
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
2049
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
2050
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
2051
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
2052
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
2053
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
2054
+ */
2055
+ template<class T>
2056
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags(
2057
+ int *minGridSize,
2058
+ int *blockSize,
2059
+ T func,
2060
+ size_t dynamicSMemSize = 0,
2061
+ int blockSizeLimit = 0,
2062
+ unsigned int flags = 0)
2063
+ {
2064
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, flags);
2065
+ }
2066
+
2067
+ /**
2068
+ * \brief Given the kernel function (\p func) and launch configuration
2069
+ * (\p config), return the maximum cluster size in \p *clusterSize.
2070
+ *
2071
+ * The cluster dimensions in \p config are ignored. If func has a required
2072
+ * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect
2073
+ * the required cluster size.
2074
+ *
2075
+ * By default this function will always return a value that's portable on
2076
+ * future hardware. A higher value may be returned if the kernel function
2077
+ * allows non-portable cluster sizes.
2078
+ *
2079
+ * This function will respect the compile time launch bounds.
2080
+ *
2081
+ * \param clusterSize - Returned maximum cluster size that can be launched
2082
+ * for the given kernel function and launch configuration
2083
+ * \param func - Kernel function for which maximum cluster
2084
+ * size is calculated
2085
+ * \param config - Launch configuration for the given kernel function
2086
+ *
2087
+ * \return
2088
+ * ::cudaSuccess,
2089
+ * ::cudaErrorInvalidDeviceFunction,
2090
+ * ::cudaErrorInvalidValue,
2091
+ * ::cudaErrorUnknown,
2092
+ * \notefnerr
2093
+ * \note_init_rt
2094
+ * \note_callback
2095
+ *
2096
+ * \sa
2097
+ * ::cudaFuncGetAttributes
2098
+ */
2099
+ template<class T>
2100
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxPotentialClusterSize(
2101
+ int *clusterSize,
2102
+ T *func,
2103
+ const cudaLaunchConfig_t *config)
2104
+ {
2105
+ return ::cudaOccupancyMaxPotentialClusterSize(clusterSize, (const void*)func, config);
2106
+ }
2107
+
2108
+ /**
2109
+ * \brief Given the kernel function (\p func) and launch configuration
2110
+ * (\p config), return the maximum number of clusters that could co-exist
2111
+ * on the target device in \p *numClusters.
2112
+ *
2113
+ * If the function has required cluster size already set (see
2114
+ * ::cudaFuncGetAttributes), the cluster size from config must either be
2115
+ * unspecified or match the required size.
2116
+ * Without required sizes, the cluster size must be specified in config,
2117
+ * else the function will return an error.
2118
+ *
2119
+ * Note that various attributes of the kernel function may affect occupancy
2120
+ * calculation. Runtime environment may affect how the hardware schedules
2121
+ * the clusters, so the calculated occupancy is not guaranteed to be achievable.
2122
+ *
2123
+ * \param numClusters - Returned maximum number of clusters that
2124
+ * could co-exist on the target device
2125
+ * \param func - Kernel function for which maximum number
2126
+ * of clusters are calculated
2127
+ * \param config - Launch configuration for the given kernel function
2128
+ *
2129
+ * \return
2130
+ * ::cudaSuccess,
2131
+ * ::cudaErrorInvalidDeviceFunction,
2132
+ * ::cudaErrorInvalidValue,
2133
+ * ::cudaErrorInvalidClusterSize,
2134
+ * ::cudaErrorUnknown,
2135
+ * \notefnerr
2136
+ * \note_init_rt
2137
+ * \note_callback
2138
+ *
2139
+ * \sa
2140
+ * ::cudaFuncGetAttributes
2141
+ */
2142
+ template<class T>
2143
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveClusters(
2144
+ int *numClusters,
2145
+ T *func,
2146
+ const cudaLaunchConfig_t *config)
2147
+ {
2148
+ return ::cudaOccupancyMaxActiveClusters(numClusters, (const void*)func, config);
2149
+ }
2150
+
2151
+ #if defined __CUDACC__
2152
+
2153
+ /**
2154
+ * \brief \hl Find out attributes for a given function
2155
+ *
2156
+ * This function obtains the attributes of a function specified via \p entry.
2157
+ * The parameter \p entry must be a pointer to a function that executes
2158
+ * on the device. The parameter specified by \p entry must be declared as a \p __global__
2159
+ * function. The fetched attributes are placed in \p attr. If the specified
2160
+ * function does not exist, then ::cudaErrorInvalidDeviceFunction is returned.
2161
+ *
2162
+ * Note that some function attributes such as
2163
+ * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock"
2164
+ * may vary based on the device that is currently being used.
2165
+ *
2166
+ * \param attr - Return pointer to function's attributes
2167
+ * \param entry - Function to get attributes of
2168
+ *
2169
+ * \return
2170
+ * ::cudaSuccess,
2171
+ * ::cudaErrorInvalidDeviceFunction
2172
+ * \notefnerr
2173
+ * \note_init_rt
2174
+ * \note_callback
2175
+ *
2176
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
2177
+ * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)",
2178
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)",
2179
+ * ::cudaSetDoubleForDevice,
2180
+ * ::cudaSetDoubleForHost
2181
+ */
2182
+ template<class T>
2183
+ static __inline__ __host__ cudaError_t cudaFuncGetAttributes(
2184
+ struct cudaFuncAttributes *attr,
2185
+ T *entry
2186
+ )
2187
+ {
2188
+ return ::cudaFuncGetAttributes(attr, (const void*)entry);
2189
+ }
2190
+
2191
+ /**
2192
+ * \brief \hl Set attributes for a given function
2193
+ *
2194
+ * This function sets the attributes of a function specified via \p entry.
2195
+ * The parameter \p entry must be a pointer to a function that executes
2196
+ * on the device. The parameter specified by \p entry must be declared as a \p __global__
2197
+ * function. The enumeration defined by \p attr is set to the value defined by \p value.
2198
+ * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned.
2199
+ * If the specified attribute cannot be written, or if the value is incorrect,
2200
+ * then ::cudaErrorInvalidValue is returned.
2201
+ *
2202
+ * Valid values for \p attr are:
2203
+ * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes
2204
+ * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture.
2205
+ * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources,
2206
+ * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor.
2207
+ * This is only a hint, and the driver can choose a different ratio if required to execute the function.
2208
+ * - ::cudaFuncAttributeRequiredClusterWidth: The required cluster width in
2209
+ * blocks. The width, height, and depth values must either all be 0 or all be
2210
+ * positive. The validity of the cluster dimensions is checked at launch time.
2211
+ * If the value is set during compile time, it cannot be set at runtime.
2212
+ * Setting it at runtime will return cudaErrorNotPermitted.
2213
+ * - ::cudaFuncAttributeRequiredClusterHeight: The required cluster height in
2214
+ * blocks. The width, height, and depth values must either all be 0 or all be
2215
+ * positive. The validity of the cluster dimensions is checked at launch time.
2216
+ * If the value is set during compile time, it cannot be set at runtime.
2217
+ * Setting it at runtime will return cudaErrorNotPermitted.
2218
+ * - ::cudaFuncAttributeRequiredClusterDepth: The required cluster depth in
2219
+ * blocks. The width, height, and depth values must either all be 0 or all be
2220
+ * positive. The validity of the cluster dimensions is checked at launch time.
2221
+ * If the value is set during compile time, it cannot be set at runtime.
2222
+ * Setting it at runtime will return cudaErrorNotPermitted.
2223
+ * - ::cudaFuncAttributeClusterSchedulingPolicyPreference: The block
2224
+ * scheduling policy of a function. The value type is cudaClusterSchedulingPolicy.
2225
+ *
2226
+ * \param entry - Function to get attributes of
2227
+ * \param attr - Attribute to set
2228
+ * \param value - Value to set
2229
+ *
2230
+ * \return
2231
+ * ::cudaSuccess,
2232
+ * ::cudaErrorInvalidDeviceFunction,
2233
+ * ::cudaErrorInvalidValue
2234
+ * \notefnerr
2235
+ * \note_init_rt
2236
+ * \note_callback
2237
+ *
2238
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
2239
+ * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)",
2240
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)",
2241
+ * ::cudaSetDoubleForDevice,
2242
+ * ::cudaSetDoubleForHost
2243
+ */
2244
+ template<class T>
2245
+ static __inline__ __host__ cudaError_t cudaFuncSetAttribute(
2246
+ T *entry,
2247
+ enum cudaFuncAttribute attr,
2248
+ int value
2249
+ )
2250
+ {
2251
+ return ::cudaFuncSetAttribute((const void*)entry, attr, value);
2252
+ }
2253
+
2254
+ /**
2255
+ * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr
2256
+ *
2257
+ * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr.
2258
+ *
2259
+ * \param kernelPtr - Returns the device kernel
2260
+ * \param entryFuncAddr - Address of device entry function to search kernel for
2261
+ *
2262
+ * \return
2263
+ * ::cudaSuccess
2264
+ *
2265
+ * \sa
2266
+ * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr) "cudaGetKernel (C API)"
2267
+ */
2268
+ template<class T>
2269
+ static __inline__ __host__ cudaError_t cudaGetKernel(
2270
+ cudaKernel_t *kernelPtr,
2271
+ const T *entryFuncAddr
2272
+ )
2273
+ {
2274
+ return ::cudaGetKernel(kernelPtr, (const void *)entryFuncAddr);
2275
+ }
2276
+
2277
+ #endif /* __CUDACC__ */
2278
+
2279
+ /** @} */ /* END CUDART_HIGHLEVEL */
2280
+
2281
+ #endif /* __cplusplus && !__CUDACC_RTC__ */
2282
+
2283
+ #if !defined(__CUDACC_RTC__)
2284
+ #if defined(__GNUC__)
2285
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
2286
+ #pragma GCC diagnostic pop
2287
+ #endif
2288
+ #elif defined(_MSC_VER)
2289
+ #pragma warning(pop)
2290
+ #endif
2291
+ #endif
2292
+
2293
+ #undef __CUDA_DEPRECATED
2294
+
2295
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__)
2296
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
2297
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__
2298
+ #endif
2299
+
2300
+ #endif /* !__CUDA_RUNTIME_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_SURFACE_TYPES_H__)
51
+ #define __CUDA_SURFACE_TYPES_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #if !defined(__CUDACC_RTC__)
62
+ #define EXCLUDE_FROM_RTC
63
+ #include "channel_descriptor.h"
64
+ #undef EXCLUDE_FROM_RTC
65
+ #endif /* !__CUDACC_RTC__ */
66
+ #include "cuda_runtime_api.h"
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #endif /* __cplusplus && __CUDACC__ */
75
+
76
+ #endif /* !__CUDA_SURFACE_TYPES_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_TEXTURE_TYPES_H__)
51
+ #define __CUDA_TEXTURE_TYPES_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #if !defined(__CUDACC_RTC__)
62
+ #define EXCLUDE_FROM_RTC
63
+ #include "channel_descriptor.h"
64
+ #undef EXCLUDE_FROM_RTC
65
+ #endif /* !__CUDACC_RTC__ */
66
+ #include "cuda_runtime_api.h"
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #endif /* __cplusplus && __CUDACC__ */
75
+
76
+ #endif /* !__CUDA_TEXTURE_TYPES_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_VDPAU_INTEROP_H__)
51
+ #define __CUDA_VDPAU_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #include <vdpau/vdpau.h>
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /**
62
+ * \addtogroup CUDART_VDPAU VDPAU Interoperability
63
+ * This section describes the VDPAU interoperability functions of the CUDA
64
+ * runtime application programming interface.
65
+ *
66
+ * @{
67
+ */
68
+
69
+ /**
70
+ * \brief Gets the CUDA device associated with a VdpDevice.
71
+ *
72
+ * Returns the CUDA device associated with a VdpDevice, if applicable.
73
+ *
74
+ * \param device - Returns the device associated with vdpDevice, or -1 if
75
+ * the device associated with vdpDevice is not a compute device.
76
+ * \param vdpDevice - A VdpDevice handle
77
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
78
+ *
79
+ * \return
80
+ * ::cudaSuccess
81
+ * \notefnerr
82
+ *
83
+ * \sa
84
+ * ::cudaVDPAUSetVDPAUDevice,
85
+ * ::cuVDPAUGetDevice
86
+ */
87
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
88
+
89
+ /**
90
+ * \brief Sets a CUDA device to use VDPAU interoperability
91
+ *
92
+ * Records \p vdpDevice as the VdpDevice for VDPAU interoperability
93
+ * with the CUDA device \p device and sets \p device as the current
94
+ * device for the calling host thread.
95
+ *
96
+ * This function will immediately initialize the primary context on
97
+ * \p device if needed.
98
+ *
99
+ * If \p device has already been initialized then this call will fail
100
+ * with the error ::cudaErrorSetOnActiveProcess. In this case it is
101
+ * necessary to reset \p device using ::cudaDeviceReset() before
102
+ * VDPAU interoperability on \p device may be enabled.
103
+ *
104
+ * \param device - Device to use for VDPAU interoperability
105
+ * \param vdpDevice - The VdpDevice to interoperate with
106
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
107
+ *
108
+ * \return
109
+ * ::cudaSuccess,
110
+ * ::cudaErrorInvalidDevice,
111
+ * ::cudaErrorSetOnActiveProcess
112
+ * \notefnerr
113
+ *
114
+ * \sa ::cudaGraphicsVDPAURegisterVideoSurface,
115
+ * ::cudaGraphicsVDPAURegisterOutputSurface,
116
+ * ::cudaDeviceReset
117
+ */
118
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
119
+
120
+ /**
121
+ * \brief Register a VdpVideoSurface object
122
+ *
123
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA.
124
+ * A handle to the registered object is returned as \p resource.
125
+ * The surface's intended usage is specified using \p flags, as follows:
126
+ *
127
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
128
+ * resource will be used. It is therefore assumed that this resource will be
129
+ * read from and written to by CUDA. This is the default value.
130
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
131
+ * will not write to this resource.
132
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
133
+ * CUDA will not read from this resource and will write over the
134
+ * entire contents of the resource, so none of the data previously
135
+ * stored in the resource will be preserved.
136
+ *
137
+ * \param resource - Pointer to the returned object handle
138
+ * \param vdpSurface - VDPAU object to be registered
139
+ * \param flags - Map flags
140
+ *
141
+ * \return
142
+ * ::cudaSuccess,
143
+ * ::cudaErrorInvalidDevice,
144
+ * ::cudaErrorInvalidValue,
145
+ * ::cudaErrorInvalidResourceHandle,
146
+ * ::cudaErrorUnknown
147
+ * \notefnerr
148
+ *
149
+ * \sa
150
+ * ::cudaVDPAUSetVDPAUDevice,
151
+ * ::cudaGraphicsUnregisterResource,
152
+ * ::cudaGraphicsSubResourceGetMappedArray,
153
+ * ::cuGraphicsVDPAURegisterVideoSurface
154
+ */
155
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags);
156
+
157
+ /**
158
+ * \brief Register a VdpOutputSurface object
159
+ *
160
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA.
161
+ * A handle to the registered object is returned as \p resource.
162
+ * The surface's intended usage is specified using \p flags, as follows:
163
+ *
164
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
165
+ * resource will be used. It is therefore assumed that this resource will be
166
+ * read from and written to by CUDA. This is the default value.
167
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
168
+ * will not write to this resource.
169
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
170
+ * CUDA will not read from this resource and will write over the
171
+ * entire contents of the resource, so none of the data previously
172
+ * stored in the resource will be preserved.
173
+ *
174
+ * \param resource - Pointer to the returned object handle
175
+ * \param vdpSurface - VDPAU object to be registered
176
+ * \param flags - Map flags
177
+ *
178
+ * \return
179
+ * ::cudaSuccess,
180
+ * ::cudaErrorInvalidDevice,
181
+ * ::cudaErrorInvalidValue,
182
+ * ::cudaErrorInvalidResourceHandle,
183
+ * ::cudaErrorUnknown
184
+ * \notefnerr
185
+ *
186
+ * \sa
187
+ * ::cudaVDPAUSetVDPAUDevice,
188
+ * ::cudaGraphicsUnregisterResource,
189
+ * ::cudaGraphicsSubResourceGetMappedArray,
190
+ * ::cuGraphicsVDPAURegisterOutputSurface
191
+ */
192
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags);
193
+
194
+ /** @} */ /* END CUDART_VDPAU */
195
+
196
+ #if defined(__cplusplus)
197
+ }
198
+ #endif /* __cplusplus */
199
+
200
+ #endif /* __CUDA_VDPAU_INTEROP_H__ */
201
+
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef __CUDART_PLATFORM_H__
51
+ #define __CUDART_PLATFORM_H__
52
+
53
+ #if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)))
54
+ #define isEglSupported 1
55
+ #endif
56
+
57
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_ATOMIC_FUNCTIONS_H__)
51
+ #define __DEVICE_ATOMIC_FUNCTIONS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #elif defined(_NVHPC_CUDA)
56
+ # define __DEVICE_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__
57
+ #else /* __CUDACC_RTC__ */
58
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
59
+ #endif /* __CUDACC_RTC__ */
60
+
61
+ #if defined(__cplusplus) && defined(__CUDACC__)
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ /* Add !defined(_NVHPC_CUDA) to avoid empty function definition in PGI CUDA
72
+ * C++ compiler where the macro __CUDA_ARCH__ is not defined. */
73
+ #if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
74
+ #define __DEF_IF_HOST { }
75
+ #else /* !__CUDA_ARCH__ */
76
+ #define __DEF_IF_HOST ;
77
+ #endif /* __CUDA_ARCH__ */
78
+
79
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
80
+ extern "C"
81
+ {
82
+ extern __device__ __device_builtin__ int __iAtomicAdd(int *address, int val);
83
+ extern __device__ __device_builtin__ unsigned int __uAtomicAdd(unsigned int *address, unsigned int val);
84
+ extern __device__ __device_builtin__ int __iAtomicExch(int *address, int val);
85
+ extern __device__ __device_builtin__ unsigned int __uAtomicExch(unsigned int *address, unsigned int val);
86
+ extern __device__ __device_builtin__ float __fAtomicExch(float *address, float val);
87
+ extern __device__ __device_builtin__ int __iAtomicMin(int *address, int val);
88
+ extern __device__ __device_builtin__ unsigned int __uAtomicMin(unsigned int *address, unsigned int val);
89
+ extern __device__ __device_builtin__ int __iAtomicMax(int *address, int val);
90
+ extern __device__ __device_builtin__ unsigned int __uAtomicMax(unsigned int *address, unsigned int val);
91
+ extern __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val);
92
+ extern __device__ __device_builtin__ unsigned int __uAtomicDec(unsigned int *address, unsigned int val);
93
+ extern __device__ __device_builtin__ int __iAtomicAnd(int *address, int val);
94
+ extern __device__ __device_builtin__ unsigned int __uAtomicAnd(unsigned int *address, unsigned int val);
95
+ extern __device__ __device_builtin__ int __iAtomicOr(int *address, int val);
96
+ extern __device__ __device_builtin__ unsigned int __uAtomicOr(unsigned int *address, unsigned int val);
97
+ extern __device__ __device_builtin__ int __iAtomicXor(int *address, int val);
98
+ extern __device__ __device_builtin__ unsigned int __uAtomicXor(unsigned int *address, unsigned int val);
99
+ extern __device__ __device_builtin__ int __iAtomicCAS(int *address, int compare, int val);
100
+ extern __device__ __device_builtin__ unsigned int __uAtomicCAS(unsigned int *address, unsigned int compare, unsigned int val);
101
+ }
102
+ #endif /* __CUDA_ARCH__ || defined(_NVHPC_CUDA) */
103
+
104
+ /*******************************************************************************
105
+ * *
106
+ * *
107
+ * *
108
+ *******************************************************************************/
109
+
110
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) __DEF_IF_HOST
111
+
112
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) __DEF_IF_HOST
113
+
114
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) __DEF_IF_HOST
115
+
116
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) __DEF_IF_HOST
117
+
118
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) __DEF_IF_HOST
119
+
120
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) __DEF_IF_HOST
121
+
122
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) __DEF_IF_HOST
123
+
124
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) __DEF_IF_HOST
125
+
126
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) __DEF_IF_HOST
127
+
128
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) __DEF_IF_HOST
129
+
130
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) __DEF_IF_HOST
131
+
132
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) __DEF_IF_HOST
133
+
134
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) __DEF_IF_HOST
135
+
136
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) __DEF_IF_HOST
137
+
138
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) __DEF_IF_HOST
139
+
140
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) __DEF_IF_HOST
141
+
142
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) __DEF_IF_HOST
143
+
144
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) __DEF_IF_HOST
145
+
146
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) __DEF_IF_HOST
147
+
148
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) __DEF_IF_HOST
149
+
150
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) __DEF_IF_HOST
151
+
152
+ /*******************************************************************************
153
+ * *
154
+ * *
155
+ * *
156
+ *******************************************************************************/
157
+
158
+ #include "cuda_runtime_api.h"
159
+
160
+ #if defined(_WIN32)
161
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
162
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
163
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
164
+ #else
165
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
166
+ #endif
167
+
168
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
169
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\
170
+ "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70."
171
+ #elif defined(_NVHPC_CUDA)
172
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()."
173
+ #else
174
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)."
175
+ #endif
176
+
177
+ extern "C"
178
+ {
179
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
180
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicAdd(unsigned long long int *address, unsigned long long int val);
181
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicExch(unsigned long long int *address, unsigned long long int val);
182
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val);
183
+ #endif /* __CUDA_ARCH__ || _NVHPC_CUDA */
184
+ extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) int __any(int cond);
185
+ extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) int __all(int cond);
186
+ }
187
+
188
+
189
+ /*******************************************************************************
190
+ * *
191
+ * *
192
+ * *
193
+ *******************************************************************************/
194
+
195
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST
196
+
197
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST
198
+
199
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) __DEF_IF_HOST
200
+
201
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) bool any(bool cond) __DEF_IF_HOST
202
+
203
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) bool all(bool cond) __DEF_IF_HOST
204
+
205
+ #undef __DEPRECATED__
206
+ #undef __WSB_DEPRECATION_MESSAGE
207
+
208
+ #endif /* __cplusplus && __CUDACC__ */
209
+
210
+ #undef __DEF_IF_HOST
211
+ #undef __DEVICE_ATOMIC_FUNCTIONS_DECL__
212
+
213
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
214
+ #include "device_atomic_functions.hpp"
215
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
216
+
217
+ #endif /* !__DEVICE_ATOMIC_FUNCTIONS_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __DEVICE_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ /*******************************************************************************
62
+ * *
63
+ * *
64
+ * *
65
+ *******************************************************************************/
66
+
67
+ #include "cuda_runtime_api.h"
68
+
69
+ /*******************************************************************************
70
+ * *
71
+ * *
72
+ * *
73
+ *******************************************************************************/
74
+
75
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val)
76
+ {
77
+ return __iAtomicAdd(address, val);
78
+ }
79
+
80
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val)
81
+ {
82
+ return __uAtomicAdd(address, val);
83
+ }
84
+
85
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val)
86
+ {
87
+ return __iAtomicAdd(address, (unsigned int)-(int)val);
88
+ }
89
+
90
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val)
91
+ {
92
+ return __uAtomicAdd(address, (unsigned int)-(int)val);
93
+ }
94
+
95
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val)
96
+ {
97
+ return __iAtomicExch(address, val);
98
+ }
99
+
100
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val)
101
+ {
102
+ return __uAtomicExch(address, val);
103
+ }
104
+
105
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val)
106
+ {
107
+ return __fAtomicExch(address, val);
108
+ }
109
+
110
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val)
111
+ {
112
+ return __iAtomicMin(address, val);
113
+ }
114
+
115
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val)
116
+ {
117
+ return __uAtomicMin(address, val);
118
+ }
119
+
120
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val)
121
+ {
122
+ return __iAtomicMax(address, val);
123
+ }
124
+
125
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val)
126
+ {
127
+ return __uAtomicMax(address, val);
128
+ }
129
+
130
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val)
131
+ {
132
+ return __uAtomicInc(address, val);
133
+ }
134
+
135
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val)
136
+ {
137
+ return __uAtomicDec(address, val);
138
+ }
139
+
140
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val)
141
+ {
142
+ return __iAtomicAnd(address, val);
143
+ }
144
+
145
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val)
146
+ {
147
+ return __uAtomicAnd(address, val);
148
+ }
149
+
150
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val)
151
+ {
152
+ return __iAtomicOr(address, val);
153
+ }
154
+
155
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val)
156
+ {
157
+ return __uAtomicOr(address, val);
158
+ }
159
+
160
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val)
161
+ {
162
+ return __iAtomicXor(address, val);
163
+ }
164
+
165
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val)
166
+ {
167
+ return __uAtomicXor(address, val);
168
+ }
169
+
170
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val)
171
+ {
172
+ return __iAtomicCAS(address, compare, val);
173
+ }
174
+
175
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val)
176
+ {
177
+ return __uAtomicCAS(address, compare, val);
178
+ }
179
+
180
+ /*******************************************************************************
181
+ * *
182
+ * *
183
+ * *
184
+ *******************************************************************************/
185
+
186
+ #include "cuda_runtime_api.h"
187
+
188
+ /*******************************************************************************
189
+ * *
190
+ * *
191
+ * *
192
+ *******************************************************************************/
193
+
194
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val)
195
+ {
196
+ return __ullAtomicAdd(address, val);
197
+ }
198
+
199
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val)
200
+ {
201
+ return __ullAtomicExch(address, val);
202
+ }
203
+
204
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val)
205
+ {
206
+ return __ullAtomicCAS(address, compare, val);
207
+ }
208
+
209
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ bool any(bool cond)
210
+ {
211
+ return (bool)__any((int)cond);
212
+ }
213
+
214
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ bool all(bool cond)
215
+ {
216
+ return (bool)__all((int)cond);
217
+ }
218
+
219
+ #endif /* __cplusplus && __CUDACC__ */
220
+
221
+ #undef __DEVICE_ATOMIC_FUNCTIONS_DECL__
222
+
223
+ #endif /* !__DEVICE_ATOMIC_FUNCTIONS_HPP__ */
224
+
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/device_double_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
65
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/device_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__
65
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_LAUNCH_PARAMETERS_H__)
51
+ #define __DEVICE_LAUNCH_PARAMETERS_H__
52
+
53
+ #include "vector_types.h"
54
+
55
+ #if !defined(__STORAGE__)
56
+
57
+ #if defined(__CUDACC_RTC__)
58
+ #define __STORAGE__ \
59
+ extern const __device__
60
+ #else /* !__CUDACC_RTC__ */
61
+ #define __STORAGE__ \
62
+ extern const
63
+ #endif /* __CUDACC_RTC__ */
64
+
65
+ #endif /* __STORAGE__ */
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif /* __cplusplus */
70
+
71
+ uint3 __device_builtin__ __STORAGE__ threadIdx;
72
+ uint3 __device_builtin__ __STORAGE__ blockIdx;
73
+ dim3 __device_builtin__ __STORAGE__ blockDim;
74
+ dim3 __device_builtin__ __STORAGE__ gridDim;
75
+ int __device_builtin__ __STORAGE__ warpSize;
76
+
77
+ #undef __STORAGE__
78
+
79
+ #if defined(__cplusplus)
80
+ }
81
+ #endif /* __cplusplus */
82
+
83
+ #if !defined(__cudaGet_threadIdx)
84
+
85
+ #define __cudaGet_threadIdx() \
86
+ threadIdx
87
+
88
+ #endif /* __cudaGet_threadIdx */
89
+
90
+ #if !defined(__cudaGet_blockIdx)
91
+
92
+ #define __cudaGet_blockIdx() \
93
+ blockIdx
94
+
95
+ #endif /* __cudaGet_blockIdx */
96
+
97
+ #if !defined(__cudaGet_blockDim)
98
+
99
+ #define __cudaGet_blockDim() \
100
+ blockDim
101
+
102
+ #endif /* __cudaGet_blockDim */
103
+
104
+ #if !defined(__cudaGet_gridDim)
105
+
106
+ #define __cudaGet_gridDim() \
107
+ gridDim
108
+
109
+ #endif /* __cudaGet_gridDim */
110
+
111
+ #if !defined(__cudaGet_warpSize)
112
+
113
+ #define __cudaGet_warpSize() \
114
+ warpSize
115
+
116
+ #endif /* __cudaGet_warpSize */
117
+
118
+ #endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_TYPES_H__)
51
+ #define __DEVICE_TYPES_H__
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
55
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
56
+ #endif
57
+
58
+ #ifndef __DOXYGEN_ONLY__
59
+ #include "crt/host_defines.h"
60
+ #endif
61
+
62
+ /*******************************************************************************
63
+ * *
64
+ * *
65
+ * *
66
+ *******************************************************************************/
67
+
68
+ enum __device_builtin__ cudaRoundMode
69
+ {
70
+ cudaRoundNearest,
71
+ cudaRoundZero,
72
+ cudaRoundPosInf,
73
+ cudaRoundMinInf
74
+ };
75
+
76
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__)
77
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
78
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
79
+ #endif
80
+
81
+ #endif /* !__DEVICE_TYPES_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_functions.h ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DRIVER_FUNCTIONS_H__)
51
+ #define __DRIVER_FUNCTIONS_H__
52
+
53
+ #include "builtin_types.h"
54
+ #include "crt/host_defines.h"
55
+ #include "driver_types.h"
56
+
57
+ /**
58
+ * \addtogroup CUDART_MEMORY
59
+ *
60
+ * @{
61
+ */
62
+
63
+ /**
64
+ * \brief Returns a cudaPitchedPtr based on input parameters
65
+ *
66
+ * Returns a ::cudaPitchedPtr based on the specified input parameters \p d,
67
+ * \p p, \p xsz, and \p ysz.
68
+ *
69
+ * \param d - Pointer to allocated memory
70
+ * \param p - Pitch of allocated memory in bytes
71
+ * \param xsz - Logical width of allocation in elements
72
+ * \param ysz - Logical height of allocation in elements
73
+ *
74
+ * \return
75
+ * ::cudaPitchedPtr specified by \p d, \p p, \p xsz, and \p ysz
76
+ *
77
+ * \sa make_cudaExtent, make_cudaPos
78
+ */
79
+ static __inline__ __host__ struct cudaPitchedPtr make_cudaPitchedPtr(void *d, size_t p, size_t xsz, size_t ysz)
80
+ {
81
+ struct cudaPitchedPtr s;
82
+
83
+ s.ptr = d;
84
+ s.pitch = p;
85
+ s.xsize = xsz;
86
+ s.ysize = ysz;
87
+
88
+ return s;
89
+ }
90
+
91
+ /**
92
+ * \brief Returns a cudaPos based on input parameters
93
+ *
94
+ * Returns a ::cudaPos based on the specified input parameters \p x,
95
+ * \p y, and \p z.
96
+ *
97
+ * \param x - X position
98
+ * \param y - Y position
99
+ * \param z - Z position
100
+ *
101
+ * \return
102
+ * ::cudaPos specified by \p x, \p y, and \p z
103
+ *
104
+ * \sa make_cudaExtent, make_cudaPitchedPtr
105
+ */
106
+ static __inline__ __host__ struct cudaPos make_cudaPos(size_t x, size_t y, size_t z)
107
+ {
108
+ struct cudaPos p;
109
+
110
+ p.x = x;
111
+ p.y = y;
112
+ p.z = z;
113
+
114
+ return p;
115
+ }
116
+
117
+ /**
118
+ * \brief Returns a cudaExtent based on input parameters
119
+ *
120
+ * Returns a ::cudaExtent based on the specified input parameters \p w,
121
+ * \p h, and \p d.
122
+ *
123
+ * \param w - Width in elements when referring to array memory, in bytes when referring to linear memory
124
+ * \param h - Height in elements
125
+ * \param d - Depth in elements
126
+ *
127
+ * \return
128
+ * ::cudaExtent specified by \p w, \p h, and \p d
129
+ *
130
+ * \sa make_cudaPitchedPtr, make_cudaPos
131
+ */
132
+ static __inline__ __host__ struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d)
133
+ {
134
+ struct cudaExtent e;
135
+
136
+ e.width = w;
137
+ e.height = h;
138
+ e.depth = d;
139
+
140
+ return e;
141
+ }
142
+
143
+ /** @} */ /* END CUDART_MEMORY */
144
+
145
+ #endif /* !__DRIVER_FUNCTIONS_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/host_config.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
65
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/host_defines.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__
65
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__LIBRARY_TYPES_H__)
51
+ #define __LIBRARY_TYPES_H__
52
+
53
+
54
+
55
+ typedef enum cudaDataType_t
56
+ {
57
+ CUDA_R_16F = 2, /* real as a half */
58
+ CUDA_C_16F = 6, /* complex as a pair of half numbers */
59
+ CUDA_R_16BF = 14, /* real as a nv_bfloat16 */
60
+ CUDA_C_16BF = 15, /* complex as a pair of nv_bfloat16 numbers */
61
+ CUDA_R_32F = 0, /* real as a float */
62
+ CUDA_C_32F = 4, /* complex as a pair of float numbers */
63
+ CUDA_R_64F = 1, /* real as a double */
64
+ CUDA_C_64F = 5, /* complex as a pair of double numbers */
65
+ CUDA_R_4I = 16, /* real as a signed 4-bit int */
66
+ CUDA_C_4I = 17, /* complex as a pair of signed 4-bit int numbers */
67
+ CUDA_R_4U = 18, /* real as a unsigned 4-bit int */
68
+ CUDA_C_4U = 19, /* complex as a pair of unsigned 4-bit int numbers */
69
+ CUDA_R_8I = 3, /* real as a signed 8-bit int */
70
+ CUDA_C_8I = 7, /* complex as a pair of signed 8-bit int numbers */
71
+ CUDA_R_8U = 8, /* real as a unsigned 8-bit int */
72
+ CUDA_C_8U = 9, /* complex as a pair of unsigned 8-bit int numbers */
73
+ CUDA_R_16I = 20, /* real as a signed 16-bit int */
74
+ CUDA_C_16I = 21, /* complex as a pair of signed 16-bit int numbers */
75
+ CUDA_R_16U = 22, /* real as a unsigned 16-bit int */
76
+ CUDA_C_16U = 23, /* complex as a pair of unsigned 16-bit int numbers */
77
+ CUDA_R_32I = 10, /* real as a signed 32-bit int */
78
+ CUDA_C_32I = 11, /* complex as a pair of signed 32-bit int numbers */
79
+ CUDA_R_32U = 12, /* real as a unsigned 32-bit int */
80
+ CUDA_C_32U = 13, /* complex as a pair of unsigned 32-bit int numbers */
81
+ CUDA_R_64I = 24, /* real as a signed 64-bit int */
82
+ CUDA_C_64I = 25, /* complex as a pair of signed 64-bit int numbers */
83
+ CUDA_R_64U = 26, /* real as a unsigned 64-bit int */
84
+ CUDA_C_64U = 27, /* complex as a pair of unsigned 64-bit int numbers */
85
+ CUDA_R_8F_E4M3 = 28, /* real as a nv_fp8_e4m3 */
86
+ CUDA_R_8F_E5M2 = 29, /* real as a nv_fp8_e5m2 */
87
+ } cudaDataType;
88
+
89
+
90
+ typedef enum libraryPropertyType_t
91
+ {
92
+ MAJOR_VERSION,
93
+ MINOR_VERSION,
94
+ PATCH_LEVEL
95
+ } libraryPropertyType;
96
+
97
+
98
+ #ifndef __cplusplus
99
+ typedef enum cudaDataType_t cudaDataType_t;
100
+ typedef enum libraryPropertyType_t libraryPropertyType_t;
101
+ #endif
102
+
103
+ #endif /* !__LIBRARY_TYPES_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_constants.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__MATH_CONSTANTS_H__)
51
+ #define __MATH_CONSTANTS_H__
52
+
53
+ /* single precision constants */
54
+ #define CUDART_INF_F __int_as_float(0x7f800000U)
55
+ #define CUDART_NAN_F __int_as_float(0x7fffffffU)
56
+ #define CUDART_MIN_DENORM_F __int_as_float(0x00000001U)
57
+ #define CUDART_MAX_NORMAL_F __int_as_float(0x7f7fffffU)
58
+ #define CUDART_NEG_ZERO_F __int_as_float(0x80000000U)
59
+ #define CUDART_ZERO_F 0.0F
60
+ #define CUDART_ONE_F 1.0F
61
+ #define CUDART_SQRT_HALF_F 0.707106781F
62
+ #define CUDART_SQRT_HALF_HI_F 0.707106781F
63
+ #define CUDART_SQRT_HALF_LO_F 1.210161749e-08F
64
+ #define CUDART_SQRT_TWO_F 1.414213562F
65
+ #define CUDART_THIRD_F 0.333333333F
66
+ #define CUDART_PIO4_F 0.785398163F
67
+ #define CUDART_PIO2_F 1.570796327F
68
+ #define CUDART_3PIO4_F 2.356194490F
69
+ #define CUDART_2_OVER_PI_F 0.636619772F
70
+ #define CUDART_SQRT_2_OVER_PI_F 0.797884561F
71
+ #define CUDART_PI_F 3.141592654F
72
+ #define CUDART_L2E_F 1.442695041F
73
+ #define CUDART_L2T_F 3.321928094F
74
+ #define CUDART_LG2_F 0.301029996F
75
+ #define CUDART_LGE_F 0.434294482F
76
+ #define CUDART_LN2_F 0.693147181F
77
+ #define CUDART_LNT_F 2.302585093F
78
+ #define CUDART_LNPI_F 1.144729886F
79
+ #define CUDART_TWO_TO_M126_F 1.175494351e-38F
80
+ #define CUDART_TWO_TO_126_F 8.507059173e37F
81
+ #define CUDART_NORM_HUGE_F 3.402823466e38F
82
+ #define CUDART_TWO_TO_23_F 8388608.0F
83
+ #define CUDART_TWO_TO_24_F 16777216.0F
84
+ #define CUDART_TWO_TO_31_F 2147483648.0F
85
+ #define CUDART_TWO_TO_32_F 4294967296.0F
86
+ #define CUDART_REMQUO_BITS_F 3U
87
+ #define CUDART_REMQUO_MASK_F (~((~0U)<<CUDART_REMQUO_BITS_F))
88
+ #define CUDART_TRIG_PLOSS_F 105615.0F
89
+
90
+ /* double precision constants */
91
+ #define CUDART_INF __longlong_as_double(0x7ff0000000000000ULL)
92
+ #define CUDART_NAN __longlong_as_double(0xfff8000000000000ULL)
93
+ #define CUDART_NEG_ZERO __longlong_as_double(0x8000000000000000ULL)
94
+ #define CUDART_MIN_DENORM __longlong_as_double(0x0000000000000001ULL)
95
+ #define CUDART_ZERO 0.0
96
+ #define CUDART_ONE 1.0
97
+ #define CUDART_SQRT_TWO 1.4142135623730951e+0
98
+ #define CUDART_SQRT_HALF 7.0710678118654757e-1
99
+ #define CUDART_SQRT_HALF_HI 7.0710678118654757e-1
100
+ #define CUDART_SQRT_HALF_LO (-4.8336466567264567e-17)
101
+ #define CUDART_THIRD 3.3333333333333333e-1
102
+ #define CUDART_TWOTHIRD 6.6666666666666667e-1
103
+ #define CUDART_PIO4 7.8539816339744828e-1
104
+ #define CUDART_PIO4_HI 7.8539816339744828e-1
105
+ #define CUDART_PIO4_LO 3.0616169978683830e-17
106
+ #define CUDART_PIO2 1.5707963267948966e+0
107
+ #define CUDART_PIO2_HI 1.5707963267948966e+0
108
+ #define CUDART_PIO2_LO 6.1232339957367660e-17
109
+ #define CUDART_3PIO4 2.3561944901923448e+0
110
+ #define CUDART_2_OVER_PI 6.3661977236758138e-1
111
+ #define CUDART_PI 3.1415926535897931e+0
112
+ #define CUDART_PI_HI 3.1415926535897931e+0
113
+ #define CUDART_PI_LO 1.2246467991473532e-16
114
+ #define CUDART_SQRT_2PI 2.5066282746310007e+0
115
+ #define CUDART_SQRT_2PI_HI 2.5066282746310007e+0
116
+ #define CUDART_SQRT_2PI_LO (-1.8328579980459167e-16)
117
+ #define CUDART_SQRT_PIO2 1.2533141373155003e+0
118
+ #define CUDART_SQRT_PIO2_HI 1.2533141373155003e+0
119
+ #define CUDART_SQRT_PIO2_LO (-9.1642899902295834e-17)
120
+ #define CUDART_SQRT_2OPI 7.9788456080286536e-1
121
+ #define CUDART_L2E 1.4426950408889634e+0
122
+ #define CUDART_L2E_HI 1.4426950408889634e+0
123
+ #define CUDART_L2E_LO 2.0355273740931033e-17
124
+ #define CUDART_L2T 3.3219280948873622e+0
125
+ #define CUDART_LG2 3.0102999566398120e-1
126
+ #define CUDART_LG2_HI 3.0102999566398120e-1
127
+ #define CUDART_LG2_LO (-2.8037281277851704e-18)
128
+ #define CUDART_LGE 4.3429448190325182e-1
129
+ #define CUDART_LGE_HI 4.3429448190325182e-1
130
+ #define CUDART_LGE_LO 1.09831965021676510e-17
131
+ #define CUDART_LN2 6.9314718055994529e-1
132
+ #define CUDART_LN2_HI 6.9314718055994529e-1
133
+ #define CUDART_LN2_LO 2.3190468138462996e-17
134
+ #define CUDART_LNT 2.3025850929940459e+0
135
+ #define CUDART_LNT_HI 2.3025850929940459e+0
136
+ #define CUDART_LNT_LO (-2.1707562233822494e-16)
137
+ #define CUDART_LNPI 1.1447298858494002e+0
138
+ #define CUDART_LN2_X_1024 7.0978271289338397e+2
139
+ #define CUDART_LN2_X_1025 7.1047586007394398e+2
140
+ #define CUDART_LN2_X_1075 7.4513321910194122e+2
141
+ #define CUDART_LG2_X_1024 3.0825471555991675e+2
142
+ #define CUDART_LG2_X_1075 3.2360724533877976e+2
143
+ #define CUDART_TWO_TO_23 8388608.0
144
+ #define CUDART_TWO_TO_52 4503599627370496.0
145
+ #define CUDART_TWO_TO_53 9007199254740992.0
146
+ #define CUDART_TWO_TO_54 18014398509481984.0
147
+ #define CUDART_TWO_TO_M54 5.5511151231257827e-17
148
+ #define CUDART_TWO_TO_M1022 2.22507385850720140e-308
149
+ #define CUDART_TRIG_PLOSS 2147483648.0
150
+ #define CUDART_DBL2INT_CVT 6755399441055744.0
151
+
152
+ #endif /* !__MATH_CONSTANTS_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
52
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
53
+ #endif
54
+
55
+ #include "crt/mma.h"
56
+
57
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__)
58
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
59
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
60
+ #endif