applied-ai-018 commited on
Commit
1112d0f
·
verified ·
1 Parent(s): da80b95

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h +64 -0
  6. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h +588 -0
  7. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h +65 -0
  8. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h +452 -0
  9. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h +108 -0
  10. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h +174 -0
  11. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h +99 -0
  12. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h +212 -0
  13. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h +634 -0
  14. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h +338 -0
  15. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h +189 -0
  16. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h +135 -0
  17. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h +133 -0
  18. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h +429 -0
  19. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h +320 -0
  20. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h +267 -0
  21. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h +62 -0
  22. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h +63 -0
  23. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h +63 -0
  24. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h +659 -0
  25. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h +96 -0
  26. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h +608 -0
  27. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h +123 -0
  28. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h +78 -0
  29. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h +0 -0
  30. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h +282 -0
  31. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h +90 -0
  32. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h +280 -0
  33. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h +365 -0
  34. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.h +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp +0 -0
  36. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h +735 -0
  37. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h +642 -0
  38. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h +0 -0
  39. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp +0 -0
  40. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h +367 -0
  41. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h +514 -0
  42. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h +1958 -0
  43. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h +0 -0
  44. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h +76 -0
  45. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h +76 -0
  46. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h +201 -0
  47. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h +57 -0
  48. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h +217 -0
  49. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp +224 -0
  50. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h +65 -0
.gitattributes CHANGED
@@ -98,3 +98,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.
98
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
99
  llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
100
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
 
 
98
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
99
  llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
100
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
101
+ llmeval-env/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*******************************************************************************
51
+ * *
52
+ * *
53
+ * *
54
+ *******************************************************************************/
55
+
56
+ #include "device_types.h"
57
+ #if !defined(__CUDACC_RTC__)
58
+ #define EXCLUDE_FROM_RTC
59
+ #include "driver_types.h"
60
+ #undef EXCLUDE_FROM_RTC
61
+ #endif /* !__CUDACC_RTC__ */
62
+ #include "surface_types.h"
63
+ #include "texture_types.h"
64
+ #include "vector_types.h"
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CHANNEL_DESCRIPTOR_H__)
51
+ #define __CHANNEL_DESCRIPTOR_H__
52
+
53
+ #if defined(__cplusplus)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #include "cuda_runtime_api.h"
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ /**
70
+ * \addtogroup CUDART_HIGHLEVEL
71
+ *
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief \hl Returns a channel descriptor using the specified format
77
+ *
78
+ * Returns a channel descriptor with format \p f and number of bits of each
79
+ * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is
80
+ * defined as:
81
+ * \code
82
+ struct cudaChannelFormatDesc {
83
+ int x, y, z, w;
84
+ enum cudaChannelFormatKind f;
85
+ };
86
+ * \endcode
87
+ *
88
+ * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,
89
+ * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat,
90
+ * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2,
91
+ * ::cudaChannelFormatKindSignedNormalized8X4,
92
+ * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2,
93
+ * ::cudaChannelFormatKindUnsignedNormalized8X4,
94
+ * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2,
95
+ * ::cudaChannelFormatKindSignedNormalized16X4,
96
+ * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2,
97
+ * ::cudaChannelFormatKindUnsignedNormalized16X4
98
+ * or ::cudaChannelFormatKindNV12.
99
+ *
100
+ * The format is specified by the template specialization.
101
+ *
102
+ * The template function specializes for the following scalar types:
103
+ * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float.
104
+ * The template function specializes for the following vector types:
105
+ * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}.
106
+ * The template function specializes for following cudaChannelFormatKind enum values:
107
+ * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12.
108
+ *
109
+ * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone
110
+ *
111
+ * \return
112
+ * Channel descriptor with format \p f
113
+ *
114
+ * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)",
115
+ * ::cudaGetChannelDesc,
116
+ */
117
+ template<class T> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
118
+ {
119
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
120
+ }
121
+
122
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void)
123
+ {
124
+ int e = (int)sizeof(unsigned short) * 8;
125
+
126
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
127
+ }
128
+
129
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void)
130
+ {
131
+ int e = (int)sizeof(unsigned short) * 8;
132
+
133
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
134
+ }
135
+
136
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void)
137
+ {
138
+ int e = (int)sizeof(unsigned short) * 8;
139
+
140
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
141
+ }
142
+
143
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void)
144
+ {
145
+ int e = (int)sizeof(unsigned short) * 8;
146
+
147
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
148
+ }
149
+
150
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char>(void)
151
+ {
152
+ int e = (int)sizeof(char) * 8;
153
+
154
+ #if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__)
155
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
156
+ #else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
157
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
158
+ #endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
159
+ }
160
+
161
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<signed char>(void)
162
+ {
163
+ int e = (int)sizeof(signed char) * 8;
164
+
165
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
166
+ }
167
+
168
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned char>(void)
169
+ {
170
+ int e = (int)sizeof(unsigned char) * 8;
171
+
172
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
173
+ }
174
+
175
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char1>(void)
176
+ {
177
+ int e = (int)sizeof(signed char) * 8;
178
+
179
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
180
+ }
181
+
182
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar1>(void)
183
+ {
184
+ int e = (int)sizeof(unsigned char) * 8;
185
+
186
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
187
+ }
188
+
189
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char2>(void)
190
+ {
191
+ int e = (int)sizeof(signed char) * 8;
192
+
193
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
194
+ }
195
+
196
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar2>(void)
197
+ {
198
+ int e = (int)sizeof(unsigned char) * 8;
199
+
200
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
201
+ }
202
+
203
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char4>(void)
204
+ {
205
+ int e = (int)sizeof(signed char) * 8;
206
+
207
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
208
+ }
209
+
210
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar4>(void)
211
+ {
212
+ int e = (int)sizeof(unsigned char) * 8;
213
+
214
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
215
+ }
216
+
217
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short>(void)
218
+ {
219
+ int e = (int)sizeof(short) * 8;
220
+
221
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
222
+ }
223
+
224
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned short>(void)
225
+ {
226
+ int e = (int)sizeof(unsigned short) * 8;
227
+
228
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
229
+ }
230
+
231
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short1>(void)
232
+ {
233
+ int e = (int)sizeof(short) * 8;
234
+
235
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
236
+ }
237
+
238
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort1>(void)
239
+ {
240
+ int e = (int)sizeof(unsigned short) * 8;
241
+
242
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
243
+ }
244
+
245
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short2>(void)
246
+ {
247
+ int e = (int)sizeof(short) * 8;
248
+
249
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
250
+ }
251
+
252
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort2>(void)
253
+ {
254
+ int e = (int)sizeof(unsigned short) * 8;
255
+
256
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
257
+ }
258
+
259
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short4>(void)
260
+ {
261
+ int e = (int)sizeof(short) * 8;
262
+
263
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
264
+ }
265
+
266
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort4>(void)
267
+ {
268
+ int e = (int)sizeof(unsigned short) * 8;
269
+
270
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
271
+ }
272
+
273
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int>(void)
274
+ {
275
+ int e = (int)sizeof(int) * 8;
276
+
277
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
278
+ }
279
+
280
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned int>(void)
281
+ {
282
+ int e = (int)sizeof(unsigned int) * 8;
283
+
284
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
285
+ }
286
+
287
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int1>(void)
288
+ {
289
+ int e = (int)sizeof(int) * 8;
290
+
291
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
292
+ }
293
+
294
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint1>(void)
295
+ {
296
+ int e = (int)sizeof(unsigned int) * 8;
297
+
298
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
299
+ }
300
+
301
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int2>(void)
302
+ {
303
+ int e = (int)sizeof(int) * 8;
304
+
305
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
306
+ }
307
+
308
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint2>(void)
309
+ {
310
+ int e = (int)sizeof(unsigned int) * 8;
311
+
312
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
313
+ }
314
+
315
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int4>(void)
316
+ {
317
+ int e = (int)sizeof(int) * 8;
318
+
319
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
320
+ }
321
+
322
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint4>(void)
323
+ {
324
+ int e = (int)sizeof(unsigned int) * 8;
325
+
326
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
327
+ }
328
+
329
+ #if !defined(__LP64__)
330
+
331
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long>(void)
332
+ {
333
+ int e = (int)sizeof(long) * 8;
334
+
335
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
336
+ }
337
+
338
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned long>(void)
339
+ {
340
+ int e = (int)sizeof(unsigned long) * 8;
341
+
342
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
343
+ }
344
+
345
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long1>(void)
346
+ {
347
+ int e = (int)sizeof(long) * 8;
348
+
349
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
350
+ }
351
+
352
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong1>(void)
353
+ {
354
+ int e = (int)sizeof(unsigned long) * 8;
355
+
356
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
357
+ }
358
+
359
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long2>(void)
360
+ {
361
+ int e = (int)sizeof(long) * 8;
362
+
363
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
364
+ }
365
+
366
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong2>(void)
367
+ {
368
+ int e = (int)sizeof(unsigned long) * 8;
369
+
370
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
371
+ }
372
+
373
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long4>(void)
374
+ {
375
+ int e = (int)sizeof(long) * 8;
376
+
377
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
378
+ }
379
+
380
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong4>(void)
381
+ {
382
+ int e = (int)sizeof(unsigned long) * 8;
383
+
384
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
385
+ }
386
+
387
+ #endif /* !__LP64__ */
388
+
389
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float>(void)
390
+ {
391
+ int e = (int)sizeof(float) * 8;
392
+
393
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
394
+ }
395
+
396
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float1>(void)
397
+ {
398
+ int e = (int)sizeof(float) * 8;
399
+
400
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
401
+ }
402
+
403
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float2>(void)
404
+ {
405
+ int e = (int)sizeof(float) * 8;
406
+
407
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
408
+ }
409
+
410
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float4>(void)
411
+ {
412
+ int e = (int)sizeof(float) * 8;
413
+
414
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
415
+ }
416
+
417
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void)
418
+ {
419
+ int e = (int)sizeof(char) * 8;
420
+
421
+ return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12);
422
+ }
423
+
424
+ template<cudaChannelFormatKind> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
425
+ {
426
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
427
+ }
428
+
429
+ /* Signed 8-bit normalized integer formats */
430
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X1>(void)
431
+ {
432
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1);
433
+ }
434
+
435
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X2>(void)
436
+ {
437
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2);
438
+ }
439
+
440
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X4>(void)
441
+ {
442
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4);
443
+ }
444
+
445
+ /* Unsigned 8-bit normalized integer formats */
446
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X1>(void)
447
+ {
448
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1);
449
+ }
450
+
451
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X2>(void)
452
+ {
453
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2);
454
+ }
455
+
456
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X4>(void)
457
+ {
458
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4);
459
+ }
460
+
461
+ /* Signed 16-bit normalized integer formats */
462
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X1>(void)
463
+ {
464
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1);
465
+ }
466
+
467
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X2>(void)
468
+ {
469
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2);
470
+ }
471
+
472
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X4>(void)
473
+ {
474
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4);
475
+ }
476
+
477
+ /* Unsigned 16-bit normalized integer formats */
478
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X1>(void)
479
+ {
480
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1);
481
+ }
482
+
483
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X2>(void)
484
+ {
485
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2);
486
+ }
487
+
488
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X4>(void)
489
+ {
490
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4);
491
+ }
492
+
493
+ /* NV12 format */
494
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindNV12>(void)
495
+ {
496
+ return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12);
497
+ }
498
+
499
+ /* BC1 format */
500
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1>(void)
501
+ {
502
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1);
503
+ }
504
+
505
+ /* BC1sRGB format */
506
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1SRGB>(void)
507
+ {
508
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB);
509
+ }
510
+
511
+ /* BC2 format */
512
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2>(void)
513
+ {
514
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2);
515
+ }
516
+
517
+ /* BC2sRGB format */
518
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2SRGB>(void)
519
+ {
520
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB);
521
+ }
522
+
523
+ /* BC3 format */
524
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3>(void)
525
+ {
526
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3);
527
+ }
528
+
529
+ /* BC3sRGB format */
530
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3SRGB>(void)
531
+ {
532
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB);
533
+ }
534
+
535
+ /* BC4 unsigned format */
536
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed4>(void)
537
+ {
538
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4);
539
+ }
540
+
541
+ /* BC4 signed format */
542
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed4>(void)
543
+ {
544
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4);
545
+ }
546
+
547
+ /* BC5 unsigned format */
548
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed5>(void)
549
+ {
550
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5);
551
+ }
552
+
553
+ /* BC5 signed format */
554
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed5>(void)
555
+ {
556
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5);
557
+ }
558
+
559
+ /* BC6H unsigned format */
560
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed6H>(void)
561
+ {
562
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H);
563
+ }
564
+
565
+ /* BC6H signed format */
566
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed6H>(void)
567
+ {
568
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H);
569
+ }
570
+
571
+ /* BC7 format */
572
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7>(void)
573
+ {
574
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7);
575
+ }
576
+
577
+ /* BC7sRGB format */
578
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7SRGB>(void)
579
+ {
580
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB);
581
+ }
582
+
583
+ #endif /* __cplusplus */
584
+
585
+ /** @} */
586
+ /** @} */ /* END CUDART_TEXTURE_HL */
587
+
588
+ #endif /* !__CHANNEL_DESCRIPTOR_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/common_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
65
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_ASYNC_H
50
+ #define _CG_ASYNC_H
51
+
52
+ #include "helpers.h"
53
+ #include "info.h"
54
+
55
+ #include <cuda_pipeline.h>
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+ // Groups supported by memcpy_async
61
+ template <class TyGroup>
62
+ struct _async_copy_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _async_copy_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>>
66
+ : public _CG_STL_NAMESPACE::true_type {};
67
+ template <>
68
+ struct _async_copy_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
69
+ template <>
70
+ struct _async_copy_group_supported<cooperative_groups::thread_block> : public _CG_STL_NAMESPACE::true_type {};
71
+
72
+ template <class TyGroup>
73
+ using async_copy_group_supported = _async_copy_group_supported<details::remove_qual<TyGroup>>;
74
+
75
+ // Groups that require optimization
76
+ template <class TyGroup>
77
+ struct _async_copy_optimize_tile : public _CG_STL_NAMESPACE::false_type {};
78
+
79
+ template <typename TyPar>
80
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<1, TyPar>>
81
+ : public _CG_STL_NAMESPACE::false_type {};
82
+
83
+ template <unsigned int Sz, typename TyPar>
84
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<Sz, TyPar>>
85
+ : public _CG_STL_NAMESPACE::true_type {};
86
+
87
+ template <class TyGroup>
88
+ using async_copy_optimize_tile = _async_copy_optimize_tile<details::remove_qual<TyGroup>>;
89
+
90
+ // SFINAE helpers for tile optimizations
91
+ template <class TyGroup>
92
+ using enable_tile_optimization =
93
+ typename _CG_STL_NAMESPACE::enable_if<async_copy_optimize_tile<TyGroup>::value, void *>::type;
94
+
95
+ template <class TyGroup>
96
+ using disable_tile_optimization =
97
+ typename _CG_STL_NAMESPACE::enable_if<!async_copy_optimize_tile<TyGroup>::value, void *>::type;
98
+
99
+ // Segment for punning to aligned types
100
+ template <unsigned int N>
101
+ struct _Segment {
102
+ int _seg[N];
103
+ };
104
+
105
+ // Trivial layout guaranteed-aligned copy-async compatible segments
106
+ template <unsigned int N>
107
+ struct Segment;
108
+ template <>
109
+ struct __align__(4) Segment<1> : public _Segment<1>{};
110
+ template <>
111
+ struct __align__(8) Segment<2> : public _Segment<2>{};
112
+ template <>
113
+ struct __align__(16) Segment<4> : public _Segment<4>{};
114
+
115
+ // Interleaved element by element copies from source to dest
116
+ template <typename TyGroup, typename TyElem>
117
+ _CG_STATIC_QUALIFIER void inline_copy(TyGroup &group, TyElem *__restrict__ dst, const TyElem *__restrict__ src,
118
+ size_t count) {
119
+ const unsigned int rank = group.thread_rank();
120
+ const unsigned int stride = group.size();
121
+
122
+ for (size_t idx = rank; idx < count; idx += stride) {
123
+ dst[idx] = src[idx];
124
+ }
125
+ }
126
+
127
+ template <typename TyGroup, typename TyElem, enable_tile_optimization<TyGroup> = nullptr>
128
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
129
+ const TyElem *__restrict__ src, size_t count) {
130
+ static_assert(async_copy_group_supported<TyGroup>::value,
131
+ "Async copy is only supported for groups that represent private shared memory");
132
+
133
+ if (count == 0) {
134
+ return;
135
+ }
136
+
137
+ const bool dstIsNotShared = !__isShared(dst);
138
+ const bool srcIsNotGlobal = !__isGlobal(src);
139
+
140
+ if (dstIsNotShared || srcIsNotGlobal) {
141
+ inline_copy(group, dst, src, count);
142
+ return;
143
+ }
144
+
145
+ const unsigned int stride = group.size();
146
+ const unsigned int rank = group.thread_rank();
147
+ // Efficient copies require warps to operate on the same amount of work at each step.
148
+ // remainders are handled in a separate stage to prevent branching
149
+ const unsigned int subWarpMask = (stride - 1);
150
+ const unsigned int subwarpCopies = (subWarpMask & (unsigned int)count);
151
+ const unsigned int maxSubwarpRank = min(rank, subwarpCopies - 1);
152
+
153
+ const size_t warpCopies = (count & (~subWarpMask));
154
+
155
+ for (size_t idx = 0; idx < warpCopies; idx += stride) {
156
+ size_t _srcIdx = rank + idx;
157
+ size_t _dstIdx = rank + idx;
158
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
159
+ }
160
+
161
+ if (subwarpCopies) {
162
+ size_t _srcIdx = warpCopies + maxSubwarpRank;
163
+ size_t _dstIdx = warpCopies + maxSubwarpRank;
164
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
165
+ }
166
+ }
167
+
168
+ template <typename TyGroup, typename TyElem, disable_tile_optimization<TyGroup> = nullptr>
169
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
170
+ const TyElem *__restrict__ src, size_t count) {
171
+ static_assert(async_copy_group_supported<TyGroup>::value,
172
+ "Async copy is only supported for groups that represent private shared memory");
173
+
174
+ const bool dstIsNotShared = !__isShared(dst);
175
+ const bool srcIsNotGlobal = !__isGlobal(src);
176
+
177
+ if (dstIsNotShared || srcIsNotGlobal) {
178
+ inline_copy(group, dst, src, count);
179
+ return;
180
+ }
181
+
182
+ unsigned int stride = group.size();
183
+ unsigned int rank = group.thread_rank();
184
+
185
+ for (size_t idx = rank; idx < count; idx += stride) {
186
+ size_t _srcIdx = idx;
187
+ size_t _dstIdx = idx;
188
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
189
+ }
190
+ }
191
+
192
+ // Determine best possible alignment given an input and initial conditions
193
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
194
+ template <unsigned int MinAlignment, unsigned int MaxAlignment>
195
+ _CG_STATIC_QUALIFIER uint32_t find_best_alignment(void *__restrict__ dst, const void *__restrict__ src) {
196
+ // Narrowing conversion intentional
197
+ uint32_t base1 = (uint32_t) reinterpret_cast<uintptr_t>(src);
198
+ uint32_t base2 = (uint32_t) reinterpret_cast<uintptr_t>(dst);
199
+
200
+ uint32_t diff = ((base1) ^ (base2)) & (MaxAlignment - 1);
201
+
202
+ // range [MaxAlignment, alignof(elem)], step: x >> 1
203
+ // over range of possible alignments, choose best available out of range
204
+ uint32_t out = MaxAlignment;
205
+ #pragma unroll
206
+ for (uint32_t alignment = (MaxAlignment >> 1); alignment >= MinAlignment; alignment >>= 1) {
207
+ if (alignment & diff)
208
+ out = alignment;
209
+ }
210
+
211
+ return out;
212
+ }
213
+
214
+ // Determine best possible alignment given an input and initial conditions
215
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
216
+ template <typename TyType, typename TyGroup>
217
+ _CG_STATIC_QUALIFIER void copy_like(const TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
218
+ size_t count) {
219
+ const char *src = reinterpret_cast<const char *>(_src);
220
+ char *dst = reinterpret_cast<char *>(_dst);
221
+
222
+ constexpr uint32_t targetAlignment = (uint32_t)alignof(TyType);
223
+
224
+ uint32_t base = (uint32_t) reinterpret_cast<uintptr_t>(src);
225
+ uint32_t alignOffset = ((~base) + 1) & (targetAlignment - 1);
226
+
227
+ inline_copy(group, dst, src, alignOffset);
228
+ count -= alignOffset;
229
+ src += alignOffset;
230
+ dst += alignOffset;
231
+
232
+ // Copy using the best available alignment, async_copy expects n-datums, not bytes
233
+ size_t asyncCount = count / sizeof(TyType);
234
+ accelerated_async_copy(group, reinterpret_cast<TyType *>(dst), reinterpret_cast<const TyType *>(src), asyncCount);
235
+ asyncCount *= sizeof(TyType);
236
+
237
+ count -= asyncCount;
238
+ src += asyncCount;
239
+ dst += asyncCount;
240
+ inline_copy(group, dst, src, count);
241
+ }
242
+
243
+ // We must determine alignment and manually align src/dst ourselves
244
+ template <size_t AlignHint>
245
+ struct _memcpy_async_align_dispatch {
246
+ template <typename TyGroup>
247
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ dst, const void *__restrict__ src, size_t count) {
248
+ uint32_t alignment = find_best_alignment<AlignHint, 16>(dst, src);
249
+
250
+ // Avoid copying the extra bytes if desired copy count is smaller
251
+ alignment = count < alignment ? AlignHint : alignment;
252
+
253
+ switch (alignment) {
254
+ default:
255
+ case 1:
256
+ inline_copy(group, reinterpret_cast<char *>(dst), reinterpret_cast<const char *>(src), count);
257
+ break;
258
+ case 2:
259
+ inline_copy(group, reinterpret_cast<short *>(dst), reinterpret_cast<const short *>(src), count >> 1);
260
+ break;
261
+ case 4:
262
+ copy_like<Segment<1>>(group, dst, src, count);
263
+ break;
264
+ case 8:
265
+ copy_like<Segment<2>>(group, dst, src, count);
266
+ break;
267
+ case 16:
268
+ copy_like<Segment<4>>(group, dst, src, count);
269
+ break;
270
+ }
271
+ }
272
+ };
273
+
274
+ // Specialization for 4 byte alignments
275
+ template <>
276
+ struct _memcpy_async_align_dispatch<4> {
277
+ template <typename TyGroup>
278
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
279
+ size_t count) {
280
+ const Segment<1> *src = reinterpret_cast<const Segment<1> *>(_src);
281
+ Segment<1> *dst = reinterpret_cast<Segment<1> *>(_dst);
282
+
283
+ // Dispatch straight to aligned LDGSTS calls
284
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
285
+ }
286
+ };
287
+
288
+ // Specialization for 8 byte alignments
289
+ template <>
290
+ struct _memcpy_async_align_dispatch<8> {
291
+ template <typename TyGroup>
292
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
293
+ size_t count) {
294
+ const Segment<2> *src = reinterpret_cast<const Segment<2> *>(_src);
295
+ Segment<2> *dst = reinterpret_cast<Segment<2> *>(_dst);
296
+
297
+ // Dispatch straight to aligned LDGSTS calls
298
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
299
+ }
300
+ };
301
+
302
+ // Alignments over 16 are truncated to 16 and bypass alignment
303
+ // This is the highest performing memcpy available
304
+ template <>
305
+ struct _memcpy_async_align_dispatch<16> {
306
+ template <typename TyGroup>
307
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
308
+ size_t count) {
309
+ const Segment<4> *src = reinterpret_cast<const Segment<4> *>(_src);
310
+ Segment<4> *dst = reinterpret_cast<Segment<4> *>(_dst);
311
+
312
+ // Dispatch straight to aligned LDGSTS calls
313
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
314
+ }
315
+ };
316
+
317
+ // byte-wide API
318
+ template <size_t Alignment, class TyGroup>
319
+ _CG_STATIC_QUALIFIER void _memcpy_async_dispatch_to_aligned_copy(const TyGroup &group, void *__restrict__ _dst,
320
+ const void *__restrict__ _src, size_t count) {
321
+ static_assert(!(Alignment & (Alignment - 1)), "Known static alignment dispatch must be a power of 2");
322
+ details::_memcpy_async_align_dispatch<Alignment>::copy(group, _dst, _src, count);
323
+ }
324
+
325
+ // Internal dispatch APIs
326
+ // These deduce the alignments and sizes necessary to invoke the underlying copy engine
327
+ template <typename Ty>
328
+ using is_void = _CG_STL_NAMESPACE::is_same<Ty, void>;
329
+
330
+ template <typename Ty>
331
+ using enable_if_not_void = typename _CG_STL_NAMESPACE::enable_if<!is_void<Ty>::value, void *>::type;
332
+
333
+ template <typename Ty>
334
+ using enable_if_void = typename _CG_STL_NAMESPACE::enable_if<is_void<Ty>::value, void *>::type;
335
+
336
+ template <typename Ty>
337
+ using enable_if_integral =
338
+ typename _CG_STL_NAMESPACE::enable_if<_CG_STL_NAMESPACE::is_integral<Ty>::value, void *>::type;
339
+
340
+ // byte-wide API using aligned_sized_t
341
+ template <class TyGroup, template <size_t> typename Alignment, size_t Hint>
342
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, void *__restrict__ _dst,
343
+ const void *__restrict__ _src, const Alignment<Hint> &count) {
344
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
345
+
346
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, (size_t)count);
347
+ }
348
+
349
+ // byte-wide API using type for aligment
350
+ template <class TyGroup, typename TyElem, typename TySize, size_t Hint = alignof(TyElem),
351
+ enable_if_not_void<TyElem> = nullptr, enable_if_integral<TySize> = nullptr>
352
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
353
+ const TyElem *__restrict__ _src, const TySize& count) {
354
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
355
+
356
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, count);
357
+ }
358
+
359
+ // byte-wide API with full alignment deduction required
360
+ template <class TyGroup, typename TyElem, typename TySize, enable_if_void<TyElem> = nullptr,
361
+ enable_if_integral<TySize> = nullptr>
362
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
363
+ const TyElem *__restrict__ _src, const TySize& count) {
364
+ details::_memcpy_async_dispatch_to_aligned_copy<1>(group, _dst, _src, count);
365
+ }
366
+
367
+ // 1d-datum API
368
+ template <class TyGroup, typename TyElem, size_t Hint = alignof(TyElem)>
369
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const size_t dstCount,
370
+ const TyElem *__restrict__ src, const size_t srcCount) {
371
+ constexpr unsigned int _align = Hint;
372
+ const size_t totalCount = min(dstCount, srcCount) * sizeof(TyElem);
373
+
374
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
375
+ }
376
+
377
+ // 1d-datum API using aligned_size_t
378
+ template <class TyGroup, typename TyElem, template <size_t> typename Alignment, size_t Hint>
379
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const Alignment<Hint> &dstCount,
380
+ const TyElem *__restrict__ src, const Alignment<Hint> &srcCount) {
381
+ constexpr unsigned int _align = Hint;
382
+ const size_t totalCount = min((size_t)dstCount, (size_t)srcCount) * sizeof(TyElem);
383
+
384
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
385
+ }
386
+
387
+ } // namespace details
388
+
389
+ /*
390
+ * Group submit batch of async-copy to cover contiguous 1D array
391
+ * and commit that batch to eventually wait for completion.
392
+ */
393
+ template <class TyGroup, typename TyElem, typename TySizeT>
394
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ _dst, const TyElem *__restrict__ _src,
395
+ const TySizeT &count) {
396
+ details::_memcpy_async_bytes(group, _dst, _src, count);
397
+ __pipeline_commit();
398
+ }
399
+
400
+ /*
401
+ * Group submit batch of async-copy to cover contiguous 1D array
402
+ * and commit that batch to eventually wait for completion.
403
+ * Object counts are in datum sized chunks, not bytes.
404
+ */
405
+ template <class TyGroup, class TyElem, typename DstLayout, typename SrcLayout>
406
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ dst, const DstLayout &dstLayout,
407
+ const TyElem *__restrict__ src, const SrcLayout &srcLayout) {
408
+ details::_memcpy_async_datum(group, dst, dstLayout, src, srcLayout);
409
+ __pipeline_commit();
410
+ }
411
+
412
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
413
+ template <unsigned int Stage, class TyGroup>
414
+ _CG_STATIC_QUALIFIER void wait_prior(const TyGroup &group) {
415
+ __pipeline_wait_prior(Stage);
416
+ group.sync();
417
+ }
418
+
419
+ /* Group wait all previously submitted memcpy_async to complete. */
420
+ template <class TyGroup>
421
+ _CG_STATIC_QUALIFIER void wait(const TyGroup &group) {
422
+ __pipeline_wait_prior(0);
423
+ group.sync();
424
+ }
425
+
426
+ /***************** CG APIs including pipeline are deprecated *****************/
427
+
428
+ /* Group submit batch of async-copy to cover of contiguous 1D array
429
+ to a pipeline and commit the batch*/
430
+ template <class TyGroup, class TyElem>
431
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void memcpy_async(TyGroup &group, TyElem *dst, size_t dstCount, const TyElem *src, size_t srcCount,
432
+ nvcuda::experimental::pipeline &pipe) {
433
+ details::_memcpy_async_datum(group, dst, dstCount, src, srcCount);
434
+ pipe.commit();
435
+ }
436
+
437
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
438
+ template <unsigned int Stage, class TyGroup>
439
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait_prior(TyGroup &group, nvcuda::experimental::pipeline &pipe) {
440
+ pipe.wait_prior<Stage>();
441
+ group.sync();
442
+ }
443
+
444
+ /* Group wait for stage-S of memcpy_async to complete. */
445
+ template <class TyGroup>
446
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait(TyGroup &group, nvcuda::experimental::pipeline &pipe, size_t stage) {
447
+ pipe.wait(stage);
448
+ group.sync();
449
+ }
450
+ _CG_END_NAMESPACE
451
+
452
+ #endif // _CG_ASYNC_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_REDUCE_H_
50
+ #define _CG_COALESCED_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "coalesced_scan.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto coalesced_reduce_to_one(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ if (group.size() == 32) {
65
+ auto out = val;
66
+ for (int offset = group.size() >> 1; offset > 0; offset >>= 1) {
67
+ out = op(out, group.shfl_up(out, offset));
68
+ }
69
+ return out;
70
+ }
71
+ else {
72
+ auto scan_result =
73
+ inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
74
+ return scan_result;
75
+ }
76
+ }
77
+
78
+ template <typename TyVal, typename TyOp>
79
+ _CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
80
+ auto out = coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
81
+ if (group.size() == 32) {
82
+ return group.shfl(out, 31);
83
+ }
84
+ else {
85
+ unsigned int group_mask = _coalesced_group_data_access::get_mask(group);
86
+ unsigned int last_thread_id = 31 - __clz(group_mask);
87
+ return details::tile::shuffle_dispatch<TyVal>::shfl(
88
+ _CG_STL_NAMESPACE::forward<TyVal>(out), group_mask, last_thread_id, 32);
89
+ }
90
+ }
91
+
92
+ template <typename TyVal, typename TyOp, unsigned int TySize, typename ParentT>
93
+ _CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group,
94
+ TyVal&& val,
95
+ TyOp&& op) -> decltype(op(val, val)) {
96
+ auto out = val;
97
+ for (int mask = TySize >> 1; mask > 0; mask >>= 1) {
98
+ out = op(out, group.shfl_xor(out, mask));
99
+ }
100
+
101
+ return out;
102
+ }
103
+
104
+ } // details
105
+
106
+ _CG_END_NAMESPACE
107
+
108
+ #endif // _CG_COALESCED_REDUCE_H_
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_SCAN_H_
50
+ #define _CG_COALESCED_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "functional.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyGroup, typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ auto out = val;
65
+ for (int mask = 1; mask < group.size(); mask <<= 1) {
66
+ auto tmp = group.shfl_up(out, mask);
67
+ if (mask <= group.thread_rank()) {
68
+ out = op(out, tmp);
69
+ }
70
+ }
71
+
72
+ return out;
73
+ }
74
+
75
+ template <typename TyGroup, typename TyVal, typename TyOp>
76
+ _CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
77
+ const unsigned int groupSize = group.size();
78
+ auto out = val;
79
+
80
+ const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
81
+ unsigned int lanemask = details::lanemask32_lt() & mask;
82
+ unsigned int srcLane = details::laneid();
83
+
84
+ const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
85
+ const unsigned int rank = __popc(lanemask);
86
+
87
+ for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
88
+ if (i <= rank) {
89
+ srcLane -= j;
90
+ j = i; /* maximum possible lane */
91
+
92
+ unsigned int begLane = base + rank - i; /* minimum possible lane */
93
+
94
+ /* Next source lane is in the range [ begLane .. srcLane ]
95
+ * If begLane < srcLane then do a binary search.
96
+ */
97
+ while (begLane < srcLane) {
98
+ const unsigned int halfLane = (begLane + srcLane) >> 1;
99
+ const unsigned int halfMask = lanemask >> halfLane;
100
+ const unsigned int d = __popc(halfMask);
101
+ if (d < i) {
102
+ srcLane = halfLane - 1; /* halfLane too large */
103
+ }
104
+ else if ((i < d) || !(halfMask & 0x01)) {
105
+ begLane = halfLane + 1; /* halfLane too small */
106
+ }
107
+ else {
108
+ begLane = srcLane = halfLane; /* happen to hit */
109
+ }
110
+ }
111
+ }
112
+
113
+ auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
114
+ if (i <= rank) {
115
+ out = op(out, tmp);
116
+ }
117
+ }
118
+ return out;
119
+ }
120
+
121
+ template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
122
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
123
+ TyVal&& val,
124
+ TyOp&& op) -> decltype(op(val, val)) {
125
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
126
+ }
127
+
128
+ template <typename TyVal, typename TyOp>
129
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
130
+ if (group.size() == 32) {
131
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
132
+ }
133
+ else {
134
+ return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
135
+ }
136
+ }
137
+
138
+ template <bool IntegralOptimized>
139
+ struct scan_choose_convertion;
140
+
141
+ template<>
142
+ struct scan_choose_convertion<true> {
143
+ template <typename TyGroup, typename TyRes, typename TyVal>
144
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
145
+ return result - val;
146
+ }
147
+ };
148
+
149
+ template<>
150
+ struct scan_choose_convertion<false> {
151
+ template <typename TyGroup, typename TyRes, typename TyVal>
152
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
153
+ auto ret = group.shfl_up(result, 1);
154
+ if (group.thread_rank() == 0) {
155
+ return {};
156
+ }
157
+ else {
158
+ return ret;
159
+ }
160
+ }
161
+ };
162
+
163
+ template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
164
+ _CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
165
+ using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
166
+ && _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
167
+ return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
168
+ }
169
+
170
+ } // details
171
+
172
+ _CG_END_NAMESPACE
173
+
174
+ #endif // _CG_COALESCED_SCAN_H_
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_DRIVER_API_H
50
+ #define _CG_DRIVER_API_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details {
57
+ template <unsigned int RegId>
58
+ _CG_QUALIFIER unsigned int load_env_reg() {
59
+ // Abort by default
60
+ _CG_ABORT();
61
+ return 0;
62
+ }
63
+
64
+ template <unsigned int HiReg, unsigned int LoReg>
65
+ _CG_QUALIFIER unsigned long long load_env_reg64() {
66
+ unsigned long long registerLo = load_env_reg<LoReg>();
67
+ unsigned long long registerHi = load_env_reg<HiReg>();
68
+
69
+ return (registerHi << 32) | registerLo;
70
+ }
71
+
72
+ // inline PTX for accessing registers requires an immediate for the special reg
73
+ # define LOAD_ENVREG(NUMBER) \
74
+ template <> _CG_QUALIFIER unsigned int load_env_reg<NUMBER>() { \
75
+ unsigned int r; \
76
+ asm ("mov.u32 %0, %%envreg" #NUMBER ";" : "=r"(r)); \
77
+ return r; \
78
+ }
79
+
80
+ // Instantiate loaders for registers used
81
+ LOAD_ENVREG(0);
82
+ LOAD_ENVREG(1);
83
+ LOAD_ENVREG(2);
84
+ # undef LOAD_ENVREG
85
+
86
+ struct grid_workspace {
87
+ unsigned int wsSize;
88
+ unsigned int barrier;
89
+ };
90
+
91
+ _CG_QUALIFIER grid_workspace* get_grid_workspace() {
92
+ unsigned long long gridWsAbiAddress = load_env_reg64<1, 2>();
93
+ // Interpret the address from envreg 1 and 2 as the driver's grid workspace
94
+ return (reinterpret_cast<grid_workspace*>(gridWsAbiAddress));
95
+ }
96
+ }
97
+ _CG_END_NAMESPACE
98
+
99
+ #endif // _CG_DRIVER_API_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_FUNCTIONAL_H
50
+ #define _CG_FUNCTIONAL_H
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ #ifdef _CG_USE_CUDA_STL
57
+ # include <cuda/std/functional>
58
+ #endif
59
+
60
+ _CG_BEGIN_NAMESPACE
61
+
62
+ namespace details {
63
+ #ifdef _CG_USE_CUDA_STL
64
+ using cuda::std::plus;
65
+ using cuda::std::bit_and;
66
+ using cuda::std::bit_xor;
67
+ using cuda::std::bit_or;
68
+ #else
69
+ template <typename Ty> struct plus {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 + arg2;}};
70
+ template <typename Ty> struct bit_and {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 & arg2;}};
71
+ template <typename Ty> struct bit_xor {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 ^ arg2;}};
72
+ template <typename Ty> struct bit_or {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 | arg2;}};
73
+ #endif // _CG_USE_PLATFORM_STL
74
+ } // details
75
+
76
+ template <typename Ty>
77
+ struct plus : public details::plus<Ty> {};
78
+
79
+ template <typename Ty>
80
+ struct less {
81
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
82
+ return (arg2 < arg1) ? arg2 : arg1;
83
+ }
84
+ };
85
+
86
+ template <typename Ty>
87
+ struct greater {
88
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
89
+ return (arg1 < arg2) ? arg2 : arg1;
90
+ }
91
+ };
92
+
93
+ template <typename Ty>
94
+ struct bit_and : public details::bit_and<Ty> {};
95
+
96
+ template <typename Ty>
97
+ struct bit_xor : public details::bit_xor<Ty> {};
98
+
99
+ template <typename Ty>
100
+ struct bit_or : public details::bit_or<Ty> {};
101
+
102
+ #if defined(_CG_HAS_STL_ATOMICS)
103
+ namespace details {
104
+ template <class Ty>
105
+ using _atomic_is_type_supported = _CG_STL_NAMESPACE::integral_constant<bool,
106
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) == 4 || sizeof(Ty) == 8)>;
107
+
108
+ template <typename TyOp> struct _atomic_op_supported : public _CG_STL_NAMESPACE::false_type {};
109
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::plus<Ty>> : public _atomic_is_type_supported<Ty> {};
110
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::less<Ty>> : public _atomic_is_type_supported<Ty> {};
111
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::greater<Ty>> : public _atomic_is_type_supported<Ty> {};
112
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_and<Ty>> : public _atomic_is_type_supported<Ty> {};
113
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_or<Ty>> : public _atomic_is_type_supported<Ty> {};
114
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_xor<Ty>> : public _atomic_is_type_supported<Ty> {};
115
+
116
+ template<typename TyAtomic, typename TyVal, typename TyOp>
117
+ _CG_QUALIFIER remove_qual<TyVal> atomic_cas_fallback(TyAtomic&& atomic, TyVal&& val, TyOp&& op) {
118
+ auto old = atomic.load(cuda::std::memory_order_relaxed);
119
+ while(!atomic.compare_exchange_weak(old, op(old, val), cuda::std::memory_order_relaxed));
120
+ return old;
121
+ }
122
+
123
+ template<typename TyOp>
124
+ struct op_picker;
125
+
126
+ template<typename TyVal>
127
+ struct op_picker<cooperative_groups::plus<TyVal>> {
128
+ template<typename TyAtomic>
129
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
130
+ return atomic.fetch_add(val, cuda::std::memory_order_relaxed);
131
+ }
132
+ };
133
+
134
+ template<typename TyVal>
135
+ struct op_picker<cooperative_groups::less<TyVal>> {
136
+ template<typename TyAtomic>
137
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
138
+ return atomic.fetch_min(val, cuda::std::memory_order_relaxed);
139
+ }
140
+ };
141
+
142
+ template<typename TyVal>
143
+ struct op_picker<cooperative_groups::greater<TyVal>> {
144
+ template<typename TyAtomic>
145
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
146
+ return atomic.fetch_max(val, cuda::std::memory_order_relaxed);
147
+ }
148
+ };
149
+
150
+ template<typename TyVal>
151
+ struct op_picker<cooperative_groups::bit_and<TyVal>> {
152
+ template<typename TyAtomic>
153
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
154
+ return atomic.fetch_and(val, cuda::std::memory_order_relaxed);
155
+ }
156
+ };
157
+
158
+ template<typename TyVal>
159
+ struct op_picker<cooperative_groups::bit_xor<TyVal>> {
160
+ template<typename TyAtomic>
161
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
162
+ return atomic.fetch_xor(val, cuda::std::memory_order_relaxed);
163
+ }
164
+ };
165
+
166
+ template<typename TyVal>
167
+ struct op_picker<cooperative_groups::bit_or<TyVal>> {
168
+ template<typename TyAtomic>
169
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
170
+ return atomic.fetch_or(val, cuda::std::memory_order_relaxed);
171
+ }
172
+ };
173
+
174
+ template<bool atomic_supported>
175
+ struct atomic_update_dispatch {};
176
+
177
+ template<>
178
+ struct atomic_update_dispatch<false> {
179
+ template<typename TyAtomic, typename TyVal, typename TyOp>
180
+ _CG_STATIC_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
181
+ return atomic_cas_fallback(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
182
+ }
183
+ };
184
+
185
+ template<>
186
+ struct atomic_update_dispatch<true> {
187
+ template<typename TyAtomic, typename TyVal, typename TyOp>
188
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val, TyOp&& op) {
189
+ using dispatch = op_picker<details::remove_qual<TyOp>>;
190
+
191
+ return dispatch::atomic_update(atomic, val);
192
+ }
193
+ };
194
+
195
+ template<typename TyAtomic, typename TyVal, typename TyOp>
196
+ _CG_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
197
+ using dispatch = atomic_update_dispatch<_atomic_op_supported<details::remove_qual<TyOp>>::value>;
198
+
199
+ return dispatch::atomic_update(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
200
+ }
201
+
202
+ template<typename TyAtomic, typename TyVal>
203
+ _CG_QUALIFIER void atomic_store(TyAtomic& atomic, TyVal&& val) {
204
+ atomic.store(val, cuda::std::memory_order_relaxed);
205
+ }
206
+ }
207
+ #endif
208
+
209
+ _CG_END_NAMESPACE
210
+
211
+ #endif
212
+ #endif //_CG_FUNCTIONAL_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_HELPERS_H_
50
+ # define _COOPERATIVE_GROUPS_HELPERS_H_
51
+
52
+ #include "info.h"
53
+ #include "sync.h"
54
+
55
+ _CG_BEGIN_NAMESPACE
56
+
57
+ namespace details {
58
+ #ifdef _CG_CPP11_FEATURES
59
+ template <typename Ty> struct _is_float_or_half : public _CG_STL_NAMESPACE::is_floating_point<Ty> {};
60
+ # ifdef _CG_HAS_FP16_COLLECTIVE
61
+ template <> struct _is_float_or_half<__half> : public _CG_STL_NAMESPACE::true_type {};
62
+ template <> struct _is_float_or_half<__half2> : public _CG_STL_NAMESPACE::true_type {};
63
+ # endif
64
+ template <typename Ty>
65
+ using is_float_or_half = _is_float_or_half<typename _CG_STL_NAMESPACE::remove_cv<Ty>::type>;
66
+
67
+ // Non-STL utility templates
68
+ template <typename Ty>
69
+ using remove_qual = typename _CG_STL_NAMESPACE::remove_cv<typename _CG_STL_NAMESPACE::remove_reference<Ty>::type>::type;
70
+
71
+ template <typename TyLhs, typename TyRhs>
72
+ using is_op_type_same = _CG_STL_NAMESPACE::is_same<remove_qual<TyLhs>, remove_qual<TyRhs>
73
+ >;
74
+ #endif
75
+
76
+ template <typename TyTrunc>
77
+ _CG_STATIC_QUALIFIER TyTrunc vec3_to_linear(dim3 index, dim3 nIndex) {
78
+ return ((TyTrunc)index.z * nIndex.y * nIndex.x) +
79
+ ((TyTrunc)index.y * nIndex.x) +
80
+ (TyTrunc)index.x;
81
+ }
82
+
83
+ namespace cta {
84
+
85
+ _CG_STATIC_QUALIFIER void sync()
86
+ {
87
+ __barrier_sync(0);
88
+ }
89
+
90
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
91
+ {
92
+ return static_cast<unsigned int>(blockDim.x * blockDim.y * blockDim.z);
93
+ }
94
+
95
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
96
+ {
97
+ return vec3_to_linear<unsigned int>(threadIdx, blockDim);
98
+ }
99
+
100
+ _CG_STATIC_QUALIFIER dim3 group_index()
101
+ {
102
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
103
+ }
104
+
105
+ _CG_STATIC_QUALIFIER dim3 thread_index()
106
+ {
107
+ return dim3(threadIdx.x, threadIdx.y, threadIdx.z);
108
+ }
109
+
110
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
111
+ {
112
+ return dim3(blockDim.x, blockDim.y, blockDim.z);
113
+ }
114
+
115
+ // Legacy aliases
116
+ _CG_STATIC_QUALIFIER unsigned int size()
117
+ {
118
+ return num_threads();
119
+ }
120
+
121
+ _CG_STATIC_QUALIFIER dim3 block_dim()
122
+ {
123
+ return dim_threads();
124
+ }
125
+
126
+ };
127
+
128
+ class _coalesced_group_data_access {
129
+ public:
130
+ // Retrieve mask of coalesced groups and tiles
131
+ template <typename TyGroup>
132
+ _CG_STATIC_QUALIFIER unsigned int get_mask(const TyGroup &group) {
133
+ return group.get_mask();
134
+ }
135
+
136
+ template <typename TyGroup>
137
+ _CG_STATIC_QUALIFIER TyGroup construct_from_mask(unsigned int mask) {
138
+ return TyGroup(mask);
139
+ }
140
+
141
+ template <typename TyGroup>
142
+ _CG_STATIC_QUALIFIER void modify_meta_group(TyGroup &group, unsigned int mgRank, unsigned int mgSize) {
143
+ group._data.coalesced.metaGroupRank = mgRank;
144
+ group._data.coalesced.metaGroupSize = mgSize;
145
+ }
146
+ };
147
+
148
+ namespace tile {
149
+ template <unsigned int TileCount, unsigned int TileMask, unsigned int LaneMask, unsigned int ShiftCount>
150
+ struct _tile_helpers{
151
+ _CG_STATIC_CONST_DECL unsigned int tileCount = TileCount;
152
+ _CG_STATIC_CONST_DECL unsigned int tileMask = TileMask;
153
+ _CG_STATIC_CONST_DECL unsigned int laneMask = LaneMask;
154
+ _CG_STATIC_CONST_DECL unsigned int shiftCount = ShiftCount;
155
+ };
156
+
157
+ template <unsigned int> struct tile_helpers;
158
+ template <> struct tile_helpers<32> : public _tile_helpers<1, 0xFFFFFFFF, 0x1F, 5> {};
159
+ template <> struct tile_helpers<16> : public _tile_helpers<2, 0x0000FFFF, 0x0F, 4> {};
160
+ template <> struct tile_helpers<8> : public _tile_helpers<4, 0x000000FF, 0x07, 3> {};
161
+ template <> struct tile_helpers<4> : public _tile_helpers<8, 0x0000000F, 0x03, 2> {};
162
+ template <> struct tile_helpers<2> : public _tile_helpers<16, 0x00000003, 0x01, 1> {};
163
+ template <> struct tile_helpers<1> : public _tile_helpers<32, 0x00000001, 0x00, 0> {};
164
+
165
+ #ifdef _CG_CPP11_FEATURES
166
+ namespace shfl {
167
+ /***********************************************************************************
168
+ * Recursively Sliced Shuffle
169
+ * Purpose:
170
+ * Slices an input type a number of times into integral types so that shuffles
171
+ * are well defined
172
+ * Expectations:
173
+ * This object *should not* be used from a reinterpret_cast pointer unless
174
+ * some alignment guarantees can be met. Use a memcpy to guarantee that loads
175
+ * from the integral types stored within are aligned and correct.
176
+ **********************************************************************************/
177
+ template <unsigned int count, bool intSized = (count <= sizeof(int))>
178
+ struct recursive_sliced_shuffle_helper;
179
+
180
+ template <unsigned int count>
181
+ struct recursive_sliced_shuffle_helper<count, true> {
182
+ int val;
183
+
184
+ template <typename TyFn>
185
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
186
+ val = shfl(val);
187
+ }
188
+ };
189
+
190
+ template <unsigned int count>
191
+ struct recursive_sliced_shuffle_helper<count, false> {
192
+ int val;
193
+ recursive_sliced_shuffle_helper<count - sizeof(int)> next;
194
+
195
+ template <typename TyFn>
196
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
197
+ val = shfl(val);
198
+ next.invoke_shuffle(shfl);
199
+ }
200
+ };
201
+ }
202
+
203
+ struct _memory_shuffle {
204
+ template <typename TyElem, typename TyShflFn>
205
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
206
+ static_assert(sizeof(TyElem) <= 32, "Cooperative groups collectives are limited to types smaller than 32B");
207
+ return TyElem{};
208
+ }
209
+
210
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
211
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
212
+ auto shfl = [=](int val) -> int {
213
+ return 0;
214
+ };
215
+
216
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
217
+ }
218
+
219
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
220
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
221
+ auto shfl = [=](int val) -> int {
222
+ return 0;
223
+ };
224
+
225
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
226
+ }
227
+
228
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
229
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
230
+ auto shfl = [=](int val) -> int {
231
+ return 0;
232
+ };
233
+
234
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
235
+ }
236
+
237
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
238
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
239
+ auto shfl = [=](int val) -> int {
240
+ return 0;
241
+ };
242
+
243
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
244
+ }
245
+ };
246
+
247
+ /***********************************************************************************
248
+ * Intrinsic Device Function Shuffle
249
+ * Purpose:
250
+ * Uses a shuffle helper that has characteristics best suited for moving
251
+ * elements between threads
252
+ * Expectations:
253
+ * Object given will be forced into an l-value type so that it can be used
254
+ * with a helper structure that reinterprets the data into intrinsic compatible
255
+ * types
256
+ * Notes:
257
+ * !! TyRet is required so that objects are returned by value and not as
258
+ * dangling references depending on the value category of the passed object
259
+ **********************************************************************************/
260
+ struct _intrinsic_compat_shuffle {
261
+ template <unsigned int count>
262
+ using shfl_helper = shfl::recursive_sliced_shuffle_helper<count>;
263
+
264
+ template <typename TyElem, typename TyShflFn>
265
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
266
+ static_assert(__is_trivially_copyable(TyElem), "Type is not compatible with device shuffle");
267
+ shfl_helper<sizeof(TyElem)> helper;
268
+ memcpy(&helper, &elem, sizeof(TyElem));
269
+ helper.invoke_shuffle(fn);
270
+ memcpy(&elem, &helper, sizeof(TyElem));
271
+ return elem;
272
+ }
273
+
274
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
275
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
276
+ auto shfl = [=](int val) -> int {
277
+ return __shfl_sync(gMask, val, srcRank, threads);
278
+ };
279
+
280
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
281
+ }
282
+
283
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
284
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
285
+ auto shfl = [=](int val) -> int {
286
+ return __shfl_down_sync(gMask, val, delta, threads);
287
+ };
288
+
289
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
290
+ }
291
+
292
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
293
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
294
+ auto shfl = [=](int val) -> int {
295
+ return __shfl_up_sync(gMask, val, delta, threads);
296
+ };
297
+
298
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
299
+ }
300
+
301
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
302
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
303
+ auto shfl = [=](int val) -> int {
304
+ return __shfl_xor_sync(gMask, val, lMask, threads);
305
+ };
306
+
307
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
308
+ }
309
+ };
310
+
311
+ struct _native_shuffle {
312
+ template <typename TyElem>
313
+ _CG_STATIC_QUALIFIER TyElem shfl(
314
+ TyElem elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
315
+ return static_cast<TyElem>(__shfl_sync(gMask, elem, srcRank, threads));
316
+ }
317
+
318
+ template <typename TyElem>
319
+ _CG_STATIC_QUALIFIER TyElem shfl_down(
320
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
321
+ return static_cast<TyElem>(__shfl_down_sync(gMask, elem, delta, threads));
322
+ }
323
+
324
+ template <typename TyElem>
325
+ _CG_STATIC_QUALIFIER TyElem shfl_up(
326
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
327
+ return static_cast<TyElem>(__shfl_up_sync(gMask, elem, delta, threads));
328
+ }
329
+
330
+ template <typename TyElem>
331
+ _CG_STATIC_QUALIFIER TyElem shfl_xor(
332
+ TyElem elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
333
+ return static_cast<TyElem>(__shfl_xor_sync(gMask, elem, lMask, threads));
334
+ }
335
+ };
336
+
337
+ // Almost all arithmetic types are supported by native shuffle
338
+ // Vector types are the exception
339
+ template <typename TyElem>
340
+ using use_native_shuffle = _CG_STL_NAMESPACE::integral_constant<
341
+ bool,
342
+ _CG_STL_NAMESPACE::is_integral<
343
+ remove_qual<TyElem>>::value ||
344
+ details::is_float_or_half<
345
+ remove_qual<TyElem>>::value
346
+ >;
347
+
348
+ constexpr unsigned long long _MemoryShuffleCutoff = 32;
349
+
350
+ template <typename TyElem,
351
+ bool IsNative = use_native_shuffle<TyElem>::value,
352
+ bool InMem = (sizeof(TyElem) > _MemoryShuffleCutoff)>
353
+ struct shuffle_dispatch;
354
+
355
+ template <typename TyElem>
356
+ struct shuffle_dispatch<TyElem, true, false> : public _native_shuffle {};
357
+
358
+ template <typename TyElem>
359
+ struct shuffle_dispatch<TyElem, false, false> : public _intrinsic_compat_shuffle {};
360
+
361
+ template <typename TyElem>
362
+ struct shuffle_dispatch<TyElem, false, true> : public _memory_shuffle {};
363
+
364
+ #endif //_CG_CPP11_FEATURES
365
+ };
366
+
367
+ namespace multi_grid {
368
+ struct multi_grid_functions;
369
+ };
370
+
371
+ namespace grid {
372
+ _CG_STATIC_QUALIFIER void sync(unsigned int *bar) {
373
+ unsigned int expected = gridDim.x * gridDim.y * gridDim.z;
374
+
375
+ details::sync_grids(expected, bar);
376
+ }
377
+
378
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks()
379
+ {
380
+ // grid.y * grid.z -> [max(65535) * max(65535)] fits within 4b, promote after multiplication
381
+ // grid.x * (grid.y * grid.z) -> [max(2^31-1) * max(65535 * 65535)] exceeds 4b, promote before multiplication
382
+ return (unsigned long long)gridDim.x * (gridDim.y * gridDim.z);
383
+ }
384
+
385
+ _CG_STATIC_QUALIFIER unsigned long long num_threads()
386
+ {
387
+ return num_blocks() * cta::num_threads();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long block_rank()
391
+ {
392
+ return vec3_to_linear<unsigned long long>(blockIdx, gridDim);
393
+ }
394
+
395
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank()
396
+ {
397
+ return block_rank() * cta::num_threads() + cta::thread_rank();
398
+ }
399
+
400
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
401
+ {
402
+ return dim3(gridDim.x, gridDim.y, gridDim.z);
403
+ }
404
+
405
+ _CG_STATIC_QUALIFIER dim3 block_index()
406
+ {
407
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
408
+ }
409
+
410
+ #if defined(_CG_HAS_CLUSTER_GROUP)
411
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
412
+ return __clusterGridDimInClusters();
413
+ }
414
+
415
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
416
+ const dim3 dimClusters = dim_clusters();
417
+ return dimClusters.x * dimClusters.y * dimClusters.z;
418
+ }
419
+
420
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
421
+ return __clusterIdx();
422
+ }
423
+
424
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
425
+ return vec3_to_linear<unsigned long long>(cluster_index(), dim_clusters());
426
+ }
427
+ #endif
428
+
429
+ // Legacy aliases
430
+ _CG_STATIC_QUALIFIER unsigned long long size()
431
+ {
432
+ return num_threads();
433
+ }
434
+
435
+ _CG_STATIC_QUALIFIER dim3 grid_dim()
436
+ {
437
+ return dim_blocks();
438
+ }
439
+ };
440
+
441
+
442
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
443
+
444
+ namespace multi_grid {
445
+ _CG_STATIC_QUALIFIER unsigned long long get_intrinsic_handle()
446
+ {
447
+ return (cudaCGGetIntrinsicHandle(cudaCGScopeMultiGrid));
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void sync(const unsigned long long handle)
451
+ {
452
+ cudaError_t err = cudaCGSynchronize(handle, 0);
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER unsigned int size(const unsigned long long handle)
456
+ {
457
+ unsigned int numThreads = 0;
458
+ cudaCGGetSize(&numThreads, NULL, handle);
459
+ return numThreads;
460
+ }
461
+
462
+ _CG_STATIC_QUALIFIER unsigned int thread_rank(const unsigned long long handle)
463
+ {
464
+ unsigned int threadRank = 0;
465
+ cudaCGGetRank(&threadRank, NULL, handle);
466
+ return threadRank;
467
+ }
468
+
469
+ _CG_STATIC_QUALIFIER unsigned int grid_rank(const unsigned long long handle)
470
+ {
471
+ unsigned int gridRank = 0;
472
+ cudaCGGetRank(NULL, &gridRank, handle);
473
+ return gridRank;
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int num_grids(const unsigned long long handle)
477
+ {
478
+ unsigned int numGrids = 0;
479
+ cudaCGGetSize(NULL, &numGrids, handle);
480
+ return numGrids;
481
+ }
482
+
483
+ # ifdef _CG_CPP11_FEATURES
484
+ struct multi_grid_functions {
485
+ decltype(multi_grid::get_intrinsic_handle) *get_intrinsic_handle;
486
+ decltype(multi_grid::sync) *sync;
487
+ decltype(multi_grid::size) *size;
488
+ decltype(multi_grid::thread_rank) *thread_rank;
489
+ decltype(multi_grid::grid_rank) *grid_rank;
490
+ decltype(multi_grid::num_grids) *num_grids;
491
+ };
492
+
493
+ template <typename = void>
494
+ _CG_STATIC_QUALIFIER const multi_grid_functions* load_grid_intrinsics() {
495
+ __constant__ static const multi_grid_functions mgf {
496
+ &multi_grid::get_intrinsic_handle,
497
+ &multi_grid::sync,
498
+ &multi_grid::size,
499
+ &multi_grid::thread_rank,
500
+ &multi_grid::grid_rank,
501
+ &multi_grid::num_grids
502
+ };
503
+
504
+ return &mgf;
505
+ }
506
+ # endif
507
+ };
508
+ #endif
509
+
510
+ #if defined(_CG_HAS_CLUSTER_GROUP)
511
+ namespace cluster {
512
+
513
+ _CG_STATIC_QUALIFIER bool isReal()
514
+ {
515
+ return __clusterDimIsSpecified();
516
+ }
517
+
518
+ _CG_STATIC_QUALIFIER void barrier_arrive()
519
+ {
520
+ __cluster_barrier_arrive();
521
+ }
522
+
523
+ _CG_STATIC_QUALIFIER void barrier_wait()
524
+ {
525
+ __cluster_barrier_wait();
526
+ }
527
+
528
+ _CG_STATIC_QUALIFIER void sync()
529
+ {
530
+ barrier_arrive();
531
+ barrier_wait();
532
+ }
533
+
534
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
535
+ {
536
+ return __cluster_query_shared_rank(addr);
537
+ }
538
+
539
+ template <typename T>
540
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
541
+ {
542
+ return static_cast<T*>(__cluster_map_shared_rank(addr, rank));
543
+ }
544
+
545
+ _CG_STATIC_QUALIFIER dim3 block_index()
546
+ {
547
+ return __clusterRelativeBlockIdx();
548
+ }
549
+
550
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
551
+ {
552
+ return __clusterRelativeBlockRank();
553
+ }
554
+
555
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
556
+ {
557
+ return block_rank() * cta::num_threads() + cta::thread_rank();
558
+ }
559
+
560
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
561
+ {
562
+ return __clusterDim();
563
+ }
564
+
565
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
566
+ {
567
+ return __clusterSizeInBlocks();
568
+ }
569
+
570
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
571
+ {
572
+ const dim3 dimBlocks = dim_blocks();
573
+ const unsigned int x = dimBlocks.x * blockDim.x;
574
+ const unsigned int y = dimBlocks.y * blockDim.y;
575
+ const unsigned int z = dimBlocks.z * blockDim.z;
576
+ return dim3(x, y, z);
577
+ }
578
+
579
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
580
+ {
581
+ return num_blocks() * cta::num_threads();
582
+ }
583
+
584
+ };
585
+ #endif
586
+
587
+ _CG_STATIC_QUALIFIER unsigned int laneid()
588
+ {
589
+ unsigned int laneid;
590
+ asm ("mov.u32 %0, %%laneid;" : "=r"(laneid));
591
+ return laneid;
592
+ }
593
+
594
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_eq()
595
+ {
596
+ unsigned int lanemask32_eq;
597
+ asm ("mov.u32 %0, %%lanemask_eq;" : "=r"(lanemask32_eq));
598
+ return (lanemask32_eq);
599
+ }
600
+
601
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_lt()
602
+ {
603
+ unsigned int lanemask32_lt;
604
+ asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask32_lt));
605
+ return (lanemask32_lt);
606
+ }
607
+
608
+ _CG_STATIC_QUALIFIER void abort()
609
+ {
610
+ _CG_ABORT();
611
+ }
612
+
613
+ template <typename Ty>
614
+ _CG_QUALIFIER void assert_if_not_arithmetic() {
615
+ #ifdef _CG_CPP11_FEATURES
616
+ static_assert(
617
+ _CG_STL_NAMESPACE::is_integral<Ty>::value ||
618
+ details::is_float_or_half<Ty>::value,
619
+ "Error: Ty is neither integer or float"
620
+ );
621
+ #endif //_CG_CPP11_FEATURES
622
+ }
623
+
624
+ #ifdef _CG_CPP11_FEATURES
625
+ _CG_STATIC_QUALIFIER constexpr unsigned int log2(unsigned int x) {
626
+ return x == 1 ? 0 : 1 + log2(x / 2);
627
+ }
628
+ #endif //_CG_CPP11_FEATURES
629
+
630
+ }; // !Namespace internal
631
+
632
+ _CG_END_NAMESPACE
633
+
634
+ #endif /* !_COOPERATIVE_GROUPS_HELPERS_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/info.h ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+
50
+
51
+ #ifndef _CG_INFO_H_
52
+ #define _CG_INFO_H_
53
+ /*
54
+ ** Define: _CG_VERSION
55
+ */
56
+ #define _CG_VERSION 1000
57
+
58
+ /*
59
+ ** Define: _CG_ABI_VERSION
60
+ */
61
+ #ifndef _CG_ABI_VERSION
62
+ # define _CG_ABI_VERSION 1
63
+ #endif
64
+
65
+ /*
66
+ ** Define: _CG_ABI_EXPERIMENTAL
67
+ ** Desc: If enabled, sets all features enabled (ABI-breaking or experimental)
68
+ */
69
+ #if defined(_CG_ABI_EXPERIMENTAL)
70
+ #endif
71
+
72
+ #define _CG_CONCAT_INNER(x, y) x ## y
73
+ #define _CG_CONCAT_OUTER(x, y) _CG_CONCAT_INNER(x, y)
74
+ #define _CG_NAMESPACE _CG_CONCAT_OUTER(__v, _CG_ABI_VERSION)
75
+
76
+ #define _CG_BEGIN_NAMESPACE \
77
+ namespace cooperative_groups { namespace _CG_NAMESPACE {
78
+ #define _CG_END_NAMESPACE \
79
+ }; using namespace _CG_NAMESPACE; };
80
+
81
+ #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSC_VER) && (_MSC_VER >= 1900))
82
+ # define _CG_CPP11_FEATURES
83
+ #endif
84
+
85
+ #if !defined(_CG_QUALIFIER)
86
+ # define _CG_QUALIFIER __forceinline__ __device__
87
+ #endif
88
+ #if !defined(_CG_STATIC_QUALIFIER)
89
+ # define _CG_STATIC_QUALIFIER static __forceinline__ __device__
90
+ #endif
91
+ #if !defined(_CG_CONSTEXPR_QUALIFIER)
92
+ # if defined(_CG_CPP11_FEATURES)
93
+ # define _CG_CONSTEXPR_QUALIFIER constexpr __forceinline__ __device__
94
+ # else
95
+ # define _CG_CONSTEXPR_QUALIFIER _CG_QUALIFIER
96
+ # endif
97
+ #endif
98
+ #if !defined(_CG_STATIC_CONSTEXPR_QUALIFIER)
99
+ # if defined(_CG_CPP11_FEATURES)
100
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER static constexpr __forceinline__ __device__
101
+ # else
102
+ # define _CG_STATIC_CONSTEXPR_QUALIFIER _CG_STATIC_QUALIFIER
103
+ # endif
104
+ #endif
105
+
106
+ #if defined(_MSC_VER)
107
+ # define _CG_DEPRECATED __declspec(deprecated)
108
+ #else
109
+ # define _CG_DEPRECATED __attribute__((deprecated))
110
+ #endif
111
+
112
+ #if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__)
113
+ # define _CG_HAS_GRID_GROUP
114
+ #endif
115
+ #if (__CUDA_ARCH__ >= 600) || !defined(__CUDA_ARCH__)
116
+ # define _CG_HAS_MULTI_GRID_GROUP
117
+ #endif
118
+ #if (__CUDA_ARCH__ >= 700) || !defined(__CUDA_ARCH__)
119
+ # define _CG_HAS_MATCH_COLLECTIVE
120
+ #endif
121
+
122
+ #if (__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__) && (defined(__NVCC__) || defined(__CUDACC_RTC__))
123
+ # define _CG_HAS_OP_REDUX
124
+ #endif
125
+
126
+ #if ((__CUDA_ARCH__ >= 800) || !defined(__CUDA_ARCH__)) && !defined(_CG_USER_PROVIDED_SHARED_MEMORY)
127
+ # define _CG_HAS_RESERVED_SHARED
128
+ #endif
129
+
130
+ #if ((__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__)) && \
131
+ (defined(__NVCC__) || defined(__CUDACC_RTC__) || defined(_CG_CLUSTER_INTRINSICS_AVAILABLE)) && \
132
+ defined(_CG_CPP11_FEATURES)
133
+ # define _CG_HAS_CLUSTER_GROUP
134
+ #endif
135
+
136
+ #if (__CUDA_ARCH__ >= 900) || !defined(__CUDA_ARCH__)
137
+ # define _CG_HAS_INSTR_ELECT
138
+ #endif
139
+
140
+ // Has __half and __half2
141
+ // Only usable if you include the cuda_fp16.h extension, and
142
+ // _before_ including cooperative_groups.h
143
+ #ifdef __CUDA_FP16_TYPES_EXIST__
144
+ # define _CG_HAS_FP16_COLLECTIVE
145
+ #endif
146
+
147
+ // Include libcu++ where supported.
148
+ #if defined(_CG_CPP11_FEATURES) && !defined(__QNX__) && !defined(__ibmxl__) && \
149
+ (defined(__NVCC__) || defined(__CUDACC_RTC__)) && \
150
+ (defined(__x86_64__) || defined(__aarch64__) || defined(__ppc64__)|| defined(_M_X64) || defined(_M_ARM64)) && \
151
+ (defined(_MSC_VER) || defined(__GNUC__) || defined(__clang__))
152
+ # define _CG_USE_CUDA_STL
153
+ #else
154
+ # define _CG_USE_OWN_TRAITS
155
+ #endif
156
+
157
+ #if defined(_CG_USE_CUDA_STL) && (!defined(__CUDA_ARCH__) || \
158
+ ((!defined(_MSC_VER) && __CUDA_ARCH__ >= 600) || (defined(_MSC_VER) && __CUDA_ARCH__ >= 700)))
159
+ # define _CG_HAS_STL_ATOMICS
160
+ #endif
161
+
162
+ #ifdef _CG_CPP11_FEATURES
163
+ // Use cuda::std:: for type_traits
164
+ # if defined(_CG_USE_CUDA_STL)
165
+ # define _CG_STL_NAMESPACE cuda::std
166
+ # include <cuda/std/type_traits>
167
+ // Use CG's implementation of type traits
168
+ # else
169
+ # define _CG_STL_NAMESPACE cooperative_groups::details::templates
170
+ # endif
171
+ #endif
172
+
173
+ #ifdef _CG_CPP11_FEATURES
174
+ # define _CG_STATIC_CONST_DECL static constexpr
175
+ # define _CG_CONST_DECL constexpr
176
+ #else
177
+ # define _CG_STATIC_CONST_DECL static const
178
+ # define _CG_CONST_DECL const
179
+ #endif
180
+
181
+ #if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__)
182
+ # define _CG_ASM_PTR_CONSTRAINT "r"
183
+ #else
184
+ # define _CG_ASM_PTR_CONSTRAINT "l"
185
+ #endif
186
+
187
+ /*
188
+ ** Define: CG_DEBUG
189
+ ** What: Enables various runtime safety checks
190
+ */
191
+ #if defined(__CUDACC_DEBUG__) && defined(CG_DEBUG) && !defined(NDEBUG)
192
+ # define _CG_DEBUG
193
+ #endif
194
+
195
+ #if defined(_CG_DEBUG)
196
+ # include <assert.h>
197
+ # define _CG_ASSERT(x) assert((x));
198
+ # define _CG_ABORT() assert(0);
199
+ #else
200
+ # define _CG_ASSERT(x)
201
+ # define _CG_ABORT() __trap();
202
+ #endif
203
+
204
+ _CG_BEGIN_NAMESPACE
205
+
206
+ namespace details {
207
+ _CG_STATIC_CONST_DECL unsigned int default_max_block_size = 1024;
208
+
209
+ #if defined(_CG_CPP11_FEATURES) && !defined(_CG_USE_CUDA_STL)
210
+ namespace templates {
211
+
212
+ /**
213
+ * Integral constants
214
+ **/
215
+ template <typename Ty, Ty Val>
216
+ struct integral_constant {
217
+ static constexpr Ty value = Val;
218
+ typedef Ty type;
219
+
220
+ _CG_QUALIFIER constexpr operator type() const noexcept { return value; }
221
+ _CG_QUALIFIER constexpr type operator()() const noexcept { return value; }
222
+ };
223
+
224
+ typedef integral_constant<bool, true> true_type;
225
+ typedef integral_constant<bool, false> false_type;
226
+
227
+ /**
228
+ * CV Qualifiers
229
+ **/
230
+ template <class Ty> struct is_lvalue_reference : public details::templates::false_type {};
231
+ template <class Ty> struct is_lvalue_reference<Ty&> : public details::templates::true_type {};
232
+
233
+ template <class Ty> struct remove_reference {typedef Ty type;};
234
+ template <class Ty> struct remove_reference<Ty&> {typedef Ty type;};
235
+ template <class Ty> struct remove_reference<Ty&&> {typedef Ty type;};
236
+
237
+ template <class Ty>
238
+ using remove_reference_t = typename details::templates::remove_reference<Ty>::type;
239
+
240
+ template <class Ty> struct remove_const {typedef Ty type;};
241
+ template <class Ty> struct remove_const<const Ty> {typedef Ty type;};
242
+
243
+ template <class Ty> struct remove_volatile {typedef Ty type;};
244
+ template <class Ty> struct remove_volatile<volatile Ty> {typedef Ty type;};
245
+
246
+ template <class Ty> struct remove_cv {typedef typename details::templates::remove_volatile<typename details::templates::remove_const<Ty>::type>::type type;};
247
+
248
+ template <class Ty>
249
+ using remove_cv_t = typename details::templates::remove_cv<Ty>::type;
250
+
251
+ template <class Ty>
252
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &t) noexcept {
253
+ return static_cast<Ty&&>(t);
254
+ }
255
+
256
+ template <class Ty>
257
+ _CG_QUALIFIER Ty&& forward(remove_reference_t<Ty> &&t) noexcept {
258
+ static_assert(!details::templates::is_lvalue_reference<Ty>::value, "Forwarding an rvalue as an lvalue is not allowed.");
259
+ return static_cast<Ty&&>(t);
260
+ }
261
+
262
+ /**
263
+ * is_integral
264
+ **/
265
+ template <class Ty> struct _is_integral : public details::templates::false_type {};
266
+ template <> struct _is_integral<bool> : public details::templates::true_type {};
267
+ template <> struct _is_integral<char> : public details::templates::true_type {};
268
+ template <> struct _is_integral<unsigned char> : public details::templates::true_type {};
269
+ template <> struct _is_integral<short> : public details::templates::true_type {};
270
+ template <> struct _is_integral<unsigned short> : public details::templates::true_type {};
271
+ template <> struct _is_integral<int> : public details::templates::true_type {};
272
+ template <> struct _is_integral<unsigned int> : public details::templates::true_type {};
273
+ template <> struct _is_integral<long> : public details::templates::true_type {};
274
+ template <> struct _is_integral<long long> : public details::templates::true_type {};
275
+ template <> struct _is_integral<unsigned long> : public details::templates::true_type {};
276
+ template <> struct _is_integral<unsigned long long> : public details::templates::true_type {};
277
+ //Vector type support?
278
+
279
+ template <typename Ty>
280
+ struct is_integral : public details::templates::_is_integral<typename details::templates::remove_cv<Ty>::type> {};
281
+
282
+ /**
283
+ * is_floating_point
284
+ **/
285
+ template <class Ty> struct _is_floating_point : public details::templates::false_type {};
286
+ template <> struct _is_floating_point<float> : public details::templates::true_type {};
287
+ template <> struct _is_floating_point<double> : public details::templates::true_type {};
288
+ template <> struct _is_floating_point<long double> : public details::templates::true_type {};
289
+ # ifdef __CUDA_FP16_TYPES_EXIST__
290
+ template <> struct _is_floating_point<__half> : public details::templates::true_type {};
291
+ template <> struct _is_floating_point<__half2> : public details::templates::true_type {};
292
+ # endif
293
+ //Vector type support?
294
+
295
+ template <typename Ty>
296
+ struct is_floating_point : public details::templates::_is_floating_point<typename details::templates::remove_cv<Ty>::type> {};
297
+
298
+ template <class T>
299
+ struct is_arithmetic : details::templates::integral_constant<
300
+ bool,
301
+ details::templates::is_integral<T>::value ||
302
+ details::templates::is_floating_point<T>::value> {};
303
+
304
+ template <typename Ty, bool = details::templates::is_arithmetic<Ty>::value>
305
+ struct _is_unsigned : details::templates::integral_constant<bool, Ty(0) < Ty(-1)> {};
306
+
307
+ template <typename Ty>
308
+ struct _is_unsigned<Ty,false> : details::templates::false_type {};
309
+
310
+ template <typename Ty>
311
+ struct is_unsigned : _is_unsigned<typename details::templates::remove_cv<Ty>::type> {};
312
+
313
+ /**
314
+ * programmatic type traits
315
+ **/
316
+ template<bool B, class Ty = void>
317
+ struct enable_if {};
318
+
319
+ template<class Ty>
320
+ struct enable_if<true, Ty> { typedef Ty type; };
321
+
322
+ template<bool Cond, typename Ty = void>
323
+ using enable_if_t = typename details::templates::enable_if<Cond, Ty>::type;
324
+
325
+ template<class Ty1, class Ty2>
326
+ struct is_same : details::templates::false_type {};
327
+
328
+ template<class Ty>
329
+ struct is_same<Ty, Ty> : details::templates::true_type {};
330
+
331
+ } // templates
332
+ #endif // _CG_CPP11_FEATURES
333
+
334
+ } // details
335
+ _CG_END_NAMESPACE
336
+
337
+
338
+ #endif // _CG_INFO_H_
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_INVOKE_H
51
+ #define _CG_INVOKE_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename Group>
63
+ struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {};
64
+ #ifdef _CG_HAS_INSTR_ELECT
65
+ template<>
66
+ struct _elect_group_supported<coalesced_group> : _CG_STL_NAMESPACE::true_type {};
67
+ template<unsigned int Size, typename Parent>
68
+ struct _elect_group_supported<thread_block_tile<Size, Parent>> :
69
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size <= 32)> {};
70
+ #endif
71
+
72
+ template <typename Group>
73
+ struct elect_group_supported : public _elect_group_supported<details::remove_qual<Group>> {};
74
+
75
+ template<typename Group>
76
+ _CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) {
77
+ int is_leader = 0;
78
+ #ifdef _CG_HAS_INSTR_ELECT
79
+ asm("{\n\t"
80
+ " .reg .pred p;\n\t"
81
+ " elect.sync %0|p, %2;\n\t"
82
+ " @p mov.s32 %1, 1;\n\t"
83
+ "}"
84
+ : "+r"(leader_lane), "+r"(is_leader) : "r" (mask));
85
+ #endif
86
+ return is_leader;
87
+ }
88
+
89
+ template<bool UseElect>
90
+ struct invoke_one_impl {};
91
+
92
+ template<>
93
+ struct invoke_one_impl<true> {
94
+ template<typename Group, typename Fn, typename... Args>
95
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
96
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
97
+ unsigned int leader_lane = 0;
98
+
99
+ if (elect_one(group, mask, leader_lane)) {
100
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
101
+ }
102
+ }
103
+
104
+ template<typename Group, typename Fn, typename... Args>
105
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
106
+ -> typename _CG_STL_NAMESPACE::remove_reference<
107
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
108
+
109
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
110
+ details::remove_qual<ResultType> result;
111
+ auto mask = details::_coalesced_group_data_access::get_mask(group);
112
+ unsigned int leader_lane = 0;
113
+
114
+ if (elect_one(group, mask, leader_lane)) {
115
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
116
+ }
117
+
118
+ // Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank.
119
+ return tile::shuffle_dispatch<ResultType>::shfl(result, mask, leader_lane, 32);
120
+ }
121
+ };
122
+
123
+ template<>
124
+ struct invoke_one_impl<false> {
125
+ template<typename Group, typename Fn, typename... Args>
126
+ _CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
127
+ if (group.thread_rank() == 0) {
128
+ _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
129
+ }
130
+ }
131
+
132
+ template<typename Group, typename Fn, typename... Args>
133
+ _CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
134
+ -> typename _CG_STL_NAMESPACE::remove_reference<
135
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
136
+
137
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
138
+ details::remove_qual<ResultType> result;
139
+
140
+ if (group.thread_rank() == 0) {
141
+ result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
142
+ }
143
+
144
+ return group.shfl(result, 0);
145
+ }
146
+ };
147
+
148
+
149
+ }; // namespace details
150
+
151
+ template<typename Group, typename Fn, typename... Args>
152
+ _CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
153
+ using impl = details::invoke_one_impl<details::elect_group_supported<Group>::value>;
154
+ impl::invoke_one(group, _CG_STL_NAMESPACE::forward<Fn>(fn), _CG_STL_NAMESPACE::forward<Args>(args)...);
155
+ }
156
+
157
+ template<typename Fn, typename... Args>
158
+ _CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args)
159
+ -> typename _CG_STL_NAMESPACE::remove_reference<
160
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
161
+
162
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
163
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
164
+ "For invocables returning void invoke_one should be used instead");
165
+ using impl = details::invoke_one_impl<details::elect_group_supported<coalesced_group>::value>;
166
+ return impl::invoke_one_broadcast(group,
167
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
168
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
169
+ }
170
+
171
+ template<unsigned int Size, typename Parent, typename Fn, typename... Args>
172
+ _CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile<Size, Parent>& group, Fn&& fn, Args&&... args)
173
+ -> typename _CG_STL_NAMESPACE::remove_reference<
174
+ decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
175
+
176
+ using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
177
+ static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
178
+ "For invocables returning void invoke_one should be used instead");
179
+ using impl = details::invoke_one_impl<details::elect_group_supported<thread_block_tile<Size, Parent>>::value>;
180
+ return impl::invoke_one_broadcast(group,
181
+ _CG_STL_NAMESPACE::forward<Fn>(fn),
182
+ _CG_STL_NAMESPACE::forward<Args>(args)...);
183
+ }
184
+
185
+ _CG_END_NAMESPACE
186
+
187
+ #endif //_CG_CPP11_FEATURES
188
+
189
+ #endif // _CG_INVOKE_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/memory.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMORY_H_
50
+ # define _COOPERATIVE_GROUPS_MEMORY_H_
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ #if defined(_CG_CPP11_FEATURES)
57
+ namespace details {
58
+ _CG_STATIC_CONST_DECL int scratch_num_reserved_bytes = 12;
59
+
60
+ #if defined(_CG_HAS_RESERVED_SHARED)
61
+ _CG_STATIC_QUALIFIER void* reserved_shared_ptr()
62
+ {
63
+ void *ptr;
64
+ asm ("{\n\t"
65
+ " .reg .u32 start;\n\t"
66
+ " .reg .u64 extended;\n\t"
67
+ " mov.u32 start, %%reserved_smem_offset_1;\n\t"
68
+ " cvt.u64.u32 extended, start;\n\t"
69
+ " cvta.shared.u64 %0, extended;\n\t"
70
+ "}"
71
+ : "=" _CG_ASM_PTR_CONSTRAINT(ptr));
72
+ return ptr;
73
+ }
74
+ #endif
75
+
76
+ struct multi_warp_scratch {
77
+ // One barrier per possible size of the group.
78
+ _CG_STATIC_CONST_DECL unsigned int memory_barriers_count = 5;
79
+ _CG_STATIC_CONST_DECL size_t sync_memory_size = memory_barriers_count * sizeof(barrier_t);
80
+
81
+ using communication_type = unsigned long long;
82
+ _CG_STATIC_CONST_DECL size_t communication_size = sizeof(communication_type);
83
+
84
+ // Layout of the scratch space:
85
+ barrier_t barriers[memory_barriers_count];
86
+ char reserved[scratch_num_reserved_bytes]; // Reserve 12 bytes for future use
87
+ communication_type communication_memory[default_max_block_size / 32];
88
+
89
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int scratch_size_needed(unsigned int max_block_size) {
90
+ // One slot of collectives memory per warp.
91
+ return scratch_num_reserved_bytes + sync_memory_size + max_block_size / 32 * communication_size;
92
+ }
93
+
94
+ _CG_QUALIFIER void init_barriers(unsigned int thread_rank) {
95
+ if (thread_rank < memory_barriers_count) {
96
+ barriers[thread_rank] = 0;
97
+ }
98
+ }
99
+ };
100
+
101
+ #if defined(_CG_HAS_RESERVED_SHARED)
102
+ // CG can expect at least 288 bytes available in reserved shared
103
+ static_assert(sizeof(multi_warp_scratch) <= 288, "multi-warp scratch size is too large");
104
+ #endif
105
+
106
+ // Make sure the structure can fit into the user provided memory
107
+ static_assert(sizeof(multi_warp_scratch) <= multi_warp_scratch::scratch_size_needed(default_max_block_size),
108
+ "multi-warp scratch size is too large");
109
+
110
+
111
+ _CG_QUALIFIER multi_warp_scratch* get_scratch_ptr(void* user_scratch) {
112
+ void *ptr;
113
+ #if defined(_CG_HAS_RESERVED_SHARED)
114
+ ptr = reserved_shared_ptr();
115
+ #else
116
+ ptr = user_scratch;
117
+ #endif
118
+ return static_cast<multi_warp_scratch*>(ptr);
119
+
120
+ }
121
+
122
+ }
123
+
124
+ template <unsigned int MaxBlockSize = details::default_max_block_size>
125
+ struct __align__(details::multi_warp_scratch::communication_size) block_tile_memory {
126
+ private:
127
+ #if !defined(_CG_HAS_RESERVED_SHARED)
128
+ char scratch[details::multi_warp_scratch::scratch_size_needed(MaxBlockSize)];
129
+ #endif
130
+ };
131
+ #endif
132
+
133
+ _CG_END_NAMESPACE
134
+
135
+ #endif /* !_COOPERATIVE_GROUPS_MEMORY_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/partitioning.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CG_PARTITIONING_H
51
+ #define _CG_PARTITIONING_H
52
+
53
+ #include "info.h"
54
+ #include "helpers.h"
55
+
56
+ _CG_BEGIN_NAMESPACE
57
+
58
+ namespace details {
59
+
60
+ template <typename TyGroup>
61
+ _CG_STATIC_QUALIFIER coalesced_group _binary_partition(const TyGroup &tile, bool pred) {
62
+ const unsigned int fullMask = ~0u;
63
+
64
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
65
+ unsigned int predMask = pred ? 0 : fullMask;
66
+ unsigned int setMask = __ballot_sync(thisMask, pred);
67
+
68
+ if (setMask == thisMask || setMask == 0) {
69
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(thisMask);
70
+ _coalesced_group_data_access::modify_meta_group(subTile, 0, 1);
71
+ return subTile;
72
+ }
73
+ else {
74
+ unsigned int subMask = thisMask & (setMask ^ predMask);
75
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
76
+ _coalesced_group_data_access::modify_meta_group(subTile, pred, 2);
77
+ return subTile;
78
+ }
79
+ }
80
+
81
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
82
+ template <typename TyGroup, typename TyPredicate>
83
+ _CG_STATIC_QUALIFIER coalesced_group _labeled_partition(const TyGroup &tile, TyPredicate pred) {
84
+ unsigned int thisMask = _coalesced_group_data_access::get_mask(tile);
85
+ unsigned int thisBias = __ffs(thisMask) - 1; // Subtract 1 to index properly from [1-32]
86
+ unsigned int subMask = __match_any_sync(thisMask, pred);
87
+
88
+ coalesced_group subTile = _coalesced_group_data_access::construct_from_mask<coalesced_group>(subMask);
89
+
90
+ int leaderLaneId = subTile.shfl(details::laneid(), 0);
91
+
92
+ bool isLeader = !subTile.thread_rank();
93
+ unsigned int leaderMask = __ballot_sync(thisMask, isLeader);
94
+ unsigned int tileRank = __fns(leaderMask, leaderLaneId, 0) - thisBias;
95
+
96
+ _coalesced_group_data_access::modify_meta_group(subTile, tileRank, __popc(leaderMask));
97
+
98
+ return subTile;
99
+ }
100
+ #endif
101
+ }; // namespace details
102
+
103
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const coalesced_group &tile, bool pred) {
104
+ return details::_binary_partition(tile, pred);
105
+ }
106
+
107
+ template <unsigned int Size, typename ParentT>
108
+ _CG_STATIC_QUALIFIER coalesced_group binary_partition(const thread_block_tile<Size, ParentT> &tile, bool pred) {
109
+ #ifdef _CG_CPP11_FEATURES
110
+ static_assert(Size <= 32, "Binary partition is available only for tiles of size smaller or equal to 32");
111
+ #endif
112
+ return details::_binary_partition(tile, pred);
113
+ }
114
+
115
+
116
+ #if defined(_CG_HAS_MATCH_COLLECTIVE) && defined(_CG_CPP11_FEATURES)
117
+ template <typename TyPredicate>
118
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const coalesced_group &tile, TyPredicate pred) {
119
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value, "labeled_partition predicate must be an integral type");
120
+ return details::_labeled_partition(tile, pred);
121
+ }
122
+
123
+ template <typename TyPredicate, unsigned int Size, typename ParentT>
124
+ _CG_STATIC_QUALIFIER coalesced_group labeled_partition(const thread_block_tile<Size, ParentT> &tile, TyPredicate pred) {
125
+ static_assert(_CG_STL_NAMESPACE::is_integral<TyPredicate>::value, "labeled_partition predicate must be an integral type");
126
+ static_assert(Size <= 32, "Labeled partition is available only for tiles of size smaller or equal to 32");
127
+ return details::_labeled_partition(tile, pred);
128
+ }
129
+ #endif
130
+
131
+ _CG_END_NAMESPACE
132
+
133
+ #endif // _CG_PARTITIONING_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/reduce.h ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_REDUCE_H_
50
+ #define _CG_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "coalesced_reduce.h"
55
+ #include "functional.h"
56
+ #include "cooperative_groups.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <class Ty>
63
+ using _redux_is_add_supported = _CG_STL_NAMESPACE::integral_constant<
64
+ bool,
65
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) <= 4)>;
66
+
67
+ template <class Ty>
68
+ using redux_is_add_supported = _redux_is_add_supported<Ty>;
69
+
70
+ // A specialization for 64 bit logical operations is possible
71
+ // but for now only accelerate 32 bit bitwise ops
72
+ template <class Ty>
73
+ using redux_is_logical_supported = redux_is_add_supported<Ty>;
74
+
75
+ // Base operator support case
76
+ template <class TyOp, class Ty> struct _redux_op_supported : public _CG_STL_NAMESPACE::false_type {};
77
+ #ifdef _CG_HAS_OP_REDUX
78
+ template <class Ty> struct _redux_op_supported<cooperative_groups::plus<Ty>, Ty> : public redux_is_add_supported<Ty> {};
79
+ template <class Ty> struct _redux_op_supported<cooperative_groups::less<Ty>, Ty> : public redux_is_add_supported<Ty> {};
80
+ template <class Ty> struct _redux_op_supported<cooperative_groups::greater<Ty>, Ty> : public redux_is_add_supported<Ty> {};
81
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_and<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
82
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_or<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
83
+ template <class Ty> struct _redux_op_supported<cooperative_groups::bit_xor<Ty>, Ty> : public redux_is_logical_supported<Ty> {};
84
+ #endif
85
+
86
+ template <class Ty, template <class> class TyOp>
87
+ using redux_op_supported = _redux_op_supported<
88
+ typename details::remove_qual<TyOp<Ty>>,
89
+ Ty>;
90
+
91
+ // Groups smaller than 16 actually have worse performance characteristics when used with redux
92
+ // tiles of size 16 and 32 perform the same or better and have better code generation profiles
93
+ template <class TyGroup> struct _redux_group_optimized : public _CG_STL_NAMESPACE::false_type {};
94
+
95
+ template <unsigned int Sz, typename TyPar>
96
+ struct _redux_group_optimized<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
97
+ bool,
98
+ (Sz >= 16)> {};
99
+ template <unsigned int Sz, typename TyPar>
100
+ struct _redux_group_optimized<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::integral_constant<
101
+ bool,
102
+ (Sz >= 16)> {};
103
+ template <>
104
+ struct _redux_group_optimized<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
105
+
106
+ template <typename TyGroup>
107
+ using redux_group_optimized = _redux_group_optimized<details::remove_qual<TyGroup>>;
108
+
109
+ template <template <class> class TyOp>
110
+ _CG_STATIC_QUALIFIER int pick_redux(int mask, int val);
111
+ template <template <class> class TyOp>
112
+ _CG_STATIC_QUALIFIER unsigned int pick_redux(int mask, unsigned int val);
113
+
114
+ #ifdef _CG_HAS_OP_REDUX
115
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::plus>(int mask, int val) {
116
+ return __reduce_add_sync(mask, val);
117
+ }
118
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::less>(int mask, int val) {
119
+ return __reduce_min_sync(mask, val);
120
+ }
121
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::greater>(int mask, int val) {
122
+ return __reduce_max_sync(mask, val);
123
+ }
124
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_and>(int mask, int val) {
125
+ return __reduce_and_sync(mask, val);
126
+ }
127
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_xor>(int mask, int val) {
128
+ return __reduce_xor_sync(mask, val);
129
+ }
130
+ template <> _CG_QUALIFIER int pick_redux<cooperative_groups::bit_or>(int mask, int val) {
131
+ return __reduce_or_sync(mask, val);
132
+ }
133
+
134
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::plus>(int mask, unsigned int val) {
135
+ return __reduce_add_sync(mask, val);
136
+ }
137
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::less>(int mask, unsigned int val) {
138
+ return __reduce_min_sync(mask, val);
139
+ }
140
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::greater>(int mask, unsigned int val) {
141
+ return __reduce_max_sync(mask, val);
142
+ }
143
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_and>(int mask, unsigned int val) {
144
+ return __reduce_and_sync(mask, val);
145
+ }
146
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_xor>(int mask, unsigned int val) {
147
+ return __reduce_xor_sync(mask, val);
148
+ }
149
+ template <> _CG_QUALIFIER unsigned int pick_redux<cooperative_groups::bit_or>(int mask, unsigned int val) {
150
+ return __reduce_or_sync(mask, val);
151
+ }
152
+ #endif
153
+
154
+
155
+ template <typename TyVal, bool = _CG_STL_NAMESPACE::is_unsigned<TyVal>::value>
156
+ struct _accelerated_op;
157
+
158
+ // Signed type redux intrinsic dispatch
159
+ template <typename TyVal>
160
+ struct _accelerated_op<TyVal, false> {
161
+ template <template <class> class TyOp>
162
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
163
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<int>(val)));
164
+ }
165
+ };
166
+
167
+ // Unsigned type redux intrinsic dispatch
168
+ template <typename TyVal>
169
+ struct _accelerated_op<TyVal, true> {
170
+ template <template <class> class TyOp>
171
+ _CG_STATIC_QUALIFIER TyVal redux(int mask, TyVal val) {
172
+ return static_cast<TyVal>(pick_redux<TyOp>(mask, static_cast<unsigned int>(val)));
173
+ }
174
+ };
175
+
176
+ template <typename TyVal>
177
+ using accelerated_op = _accelerated_op<TyVal>;
178
+
179
+
180
+ template <typename TyVal, typename TyFnInput, typename TyGroup>
181
+ class _redux_dispatch {
182
+ template <class Ty, template <class> class TyOp>
183
+ using _redux_is_usable = _CG_STL_NAMESPACE::integral_constant<bool,
184
+ redux_op_supported<Ty, TyOp>::value &&
185
+ redux_group_optimized<TyGroup>::value>;
186
+
187
+ template <class Ty, template <class> class TyOp>
188
+ using redux_is_usable = typename _CG_STL_NAMESPACE::enable_if<_redux_is_usable<Ty, TyOp>::value, void>::type*;
189
+
190
+ template <class Ty, template <class> class TyOp>
191
+ using redux_is_not_usable = typename _CG_STL_NAMESPACE::enable_if<!_redux_is_usable<Ty, TyOp>::value, void>::type*;
192
+
193
+ public:
194
+ // Dispatch to redux if the combination of op and args are supported
195
+ template<
196
+ template <class> class TyOp,
197
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
198
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
199
+ // Retrieve the mask for the group and dispatch to redux
200
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
201
+ }
202
+
203
+ template<
204
+ template <class> class TyOp,
205
+ redux_is_usable<TyFnInput, TyOp> = nullptr>
206
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
207
+ // Retrieve the mask for the group and dispatch to redux
208
+ return accelerated_op<TyFnInput>::template redux<TyOp>(_coalesced_group_data_access::get_mask(group), _CG_STL_NAMESPACE::forward<TyVal>(val));
209
+ }
210
+
211
+ // Fallback shuffle sync reduction
212
+ template <
213
+ template <class> class TyOp,
214
+ redux_is_not_usable<TyFnInput, TyOp> = nullptr>
215
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
216
+ //Dispatch to fallback shuffle sync accelerated reduction
217
+ return coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
218
+ }
219
+
220
+ };
221
+
222
+ // Group support for reduce.
223
+ template <class TyGroup> struct _reduce_group_supported : public _CG_STL_NAMESPACE::false_type {};
224
+
225
+ template <unsigned int Sz, typename TyPar>
226
+ struct _reduce_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
227
+ template <unsigned int Sz, typename TyPar>
228
+ struct _reduce_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
229
+ template <>
230
+ struct _reduce_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
231
+
232
+ template <typename TyGroup>
233
+ using reduce_group_supported = _reduce_group_supported<details::remove_qual<TyGroup>>;
234
+
235
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
236
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>&& op) -> decltype(op(val, val)) {
237
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
238
+
239
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
240
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
241
+ }
242
+
243
+ template <typename TyVal, typename TyFnInput, template <class> class TyOp, typename TyGroup>
244
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp<TyFnInput>& op) -> decltype(op(val, val)) {
245
+ static_assert(details::is_op_type_same<TyFnInput, TyVal>::value, "Operator and argument types differ");
246
+
247
+ using dispatch = details::_redux_dispatch<TyVal, TyFnInput, TyGroup>;
248
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp<TyFnInput>>(op));
249
+ }
250
+
251
+
252
+ template <typename TyVal, typename TyOp, typename TyGroup>
253
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
254
+ return details::coalesced_reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
255
+ }
256
+
257
+ template <unsigned int GroupId>
258
+ struct tile_reduce_dispatch;
259
+
260
+ template <>
261
+ struct tile_reduce_dispatch<details::coalesced_group_id> {
262
+ template <typename TyGroup, typename TyVal, typename TyFn>
263
+ _CG_STATIC_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
264
+ return details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
265
+ }
266
+ };
267
+
268
+ #if defined(_CG_CPP11_FEATURES)
269
+ template <>
270
+ struct tile_reduce_dispatch<details::multi_tile_group_id> {
271
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
272
+ _CG_STATIC_QUALIFIER auto reduce(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
273
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
274
+ using TyRet = details::remove_qual<TyVal>;
275
+ const unsigned int num_warps = Size / 32;
276
+
277
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
278
+ *warp_scratch_location =
279
+ details::reduce(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
280
+ };
281
+ auto inter_warp_lambda =
282
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
283
+ *thread_scratch_location =
284
+ details::reduce(subwarp, *thread_scratch_location, _CG_STL_NAMESPACE::forward<TyFn>(op));
285
+ };
286
+ return details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
287
+ }
288
+ };
289
+
290
+ template <unsigned int GroupId>
291
+ struct tile_async_reduce_dispatch;
292
+
293
+ template <>
294
+ struct tile_async_reduce_dispatch<details::coalesced_group_id> {
295
+ template <unsigned int TySize, typename ParentT, typename TyDst, typename TyVal, typename TyFn, typename TyResHandler>
296
+ _CG_STATIC_QUALIFIER void reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group, TyDst& dst, TyVal&& val, TyFn&& op, TyResHandler& res_handler) {
297
+ // Do regular, in group reduction
298
+ auto result = details::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
299
+
300
+ // One thread stores/updates the destination
301
+ if (group.thread_rank() == 0) {
302
+ res_handler(result);
303
+ }
304
+ }
305
+ template <typename TyDst, typename TyVal, typename TyFn, typename TyResHandler>
306
+ _CG_STATIC_QUALIFIER void reduce(const coalesced_group& group, TyDst& dst, TyVal&& val, TyFn&& op, TyResHandler& res_handler) {
307
+ // Do in group reduction to the last thread
308
+ auto result = details::coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
309
+
310
+ // One thread stores/updates the destination
311
+ if (group.thread_rank() == group.size() - 1) {
312
+ res_handler(result);
313
+ }
314
+ }
315
+ };
316
+
317
+ template <>
318
+ struct tile_async_reduce_dispatch<details::multi_tile_group_id> {
319
+ template <unsigned int TySize, typename ParentT, typename TyDst, typename TyInputVal, typename TyFn, typename TyResHandler>
320
+ _CG_STATIC_QUALIFIER void reduce(const thread_block_tile<TySize, ParentT>& group, TyDst& dst, TyInputVal&& val, TyFn&& op, TyResHandler& res_handler) {
321
+ using TyVal = remove_qual<TyInputVal>;
322
+ const unsigned int num_warps = TySize / 32;
323
+ details::barrier_t* sync_location = multi_warp_sync_location_getter(group);
324
+ auto warp_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, group.thread_rank() / 32);
325
+
326
+ // Do in warp reduce
327
+ auto warp = details::tiled_partition_internal<32, thread_block_tile<TySize, ParentT>>();
328
+ *warp_scratch_location = details::reduce(warp, _CG_STL_NAMESPACE::forward<TyInputVal>(val), op);
329
+
330
+ // Tile of size num_warps from the last warp to arrive does final reduction step
331
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), num_warps)) {
332
+ auto subwarp = details::tiled_partition_internal<num_warps, decltype(warp)>();
333
+ if (subwarp.meta_group_rank() == 0) {
334
+ auto thread_scratch_location = multi_warp_scratch_location_getter<TyVal>(group, subwarp.thread_rank());
335
+ auto thread_val = *thread_scratch_location;
336
+ // Release other warps, we read their contribution already.
337
+ subwarp.sync();
338
+ details::sync_warps_release(sync_location, subwarp.thread_rank() == 0, details::cta::thread_rank(), num_warps);
339
+ TyVal result = details::reduce(subwarp, thread_val, op);
340
+ // One thread stores the result or updates the atomic
341
+ if (subwarp.thread_rank() == 0) {
342
+ res_handler(result);
343
+ }
344
+ }
345
+ warp.sync();
346
+ }
347
+ }
348
+ };
349
+ #endif
350
+
351
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
352
+ _CG_QUALIFIER void check_reduce_params() {
353
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
354
+ static_assert(details::reduce_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
355
+ };
356
+
357
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
358
+ _CG_QUALIFIER void check_async_reduce_params() {
359
+ check_reduce_params<TyGroup, TyInputVal, TyRetVal>();
360
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
361
+ }
362
+ } // details
363
+
364
+ template <typename TyGroup, typename TyVal, typename TyFn>
365
+ _CG_QUALIFIER auto reduce(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
366
+ details::check_reduce_params<TyGroup, details::remove_qual<TyVal>, decltype(op(val, val))>();
367
+
368
+ using dispatch = details::tile_reduce_dispatch<TyGroup::_group_id>;
369
+ return dispatch::reduce(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
370
+ }
371
+
372
+ #if defined(_CG_CPP11_FEATURES)
373
+
374
+ # if defined(_CG_HAS_STL_ATOMICS)
375
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
376
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
377
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
378
+ auto update_lambda = [&] (TyVal& result) {
379
+ details::atomic_update(dst, result, op);
380
+ };
381
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
382
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
383
+ }
384
+
385
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
386
+ void _CG_QUALIFIER reduce_update_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
387
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
388
+ auto update_lambda = [&] (TyVal& result) {
389
+ details::atomic_update(dst, result, op);
390
+ };
391
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
392
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), update_lambda);
393
+ }
394
+
395
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
396
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
397
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
398
+ auto store_lambda = [&] (TyVal& result) {
399
+ details::atomic_store(dst, result);
400
+ };
401
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
402
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
403
+ }
404
+
405
+ template<typename TyGroup, typename TyVal, cuda::thread_scope Sco, typename TyInputVal, typename TyFn>
406
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) {
407
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
408
+ auto store_lambda = [&] (TyVal& result) {
409
+ details::atomic_store(dst, result);
410
+ };
411
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
412
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
413
+ }
414
+ # endif
415
+
416
+ template<typename TyGroup, typename TyVal, typename TyInputVal, typename TyFn>
417
+ void _CG_QUALIFIER reduce_store_async(const TyGroup& group, TyVal* dst, TyInputVal&& val, TyFn&& op) {
418
+ details::check_async_reduce_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
419
+ auto store_lambda = [&] (TyVal& result) {
420
+ *dst = result;
421
+ };
422
+ using dispatch = details::tile_async_reduce_dispatch<TyGroup::_group_id>;
423
+ dispatch::reduce(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op), store_lambda);
424
+ }
425
+ #endif
426
+
427
+ _CG_END_NAMESPACE
428
+
429
+ #endif // _CG_REDUCE_H_
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/scan.h ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_SCAN_H_
50
+ #define _CG_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "functional.h"
55
+ #include "coalesced_scan.h"
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+
61
+ // Group support for scan.
62
+ template <class TyGroup> struct _scan_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _scan_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
66
+ template <unsigned int Sz, typename TyPar>
67
+ struct _scan_group_supported<internal_thread_block_tile<Sz, TyPar>> : public _CG_STL_NAMESPACE::true_type {};
68
+ template <>
69
+ struct _scan_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
70
+
71
+ template <typename TyGroup>
72
+ using scan_group_supported = _scan_group_supported<details::remove_qual<TyGroup>>;
73
+
74
+ template <bool IsIntegralPlus>
75
+ struct integral_optimized_scan;
76
+
77
+ enum class ScanType { exclusive, inclusive };
78
+
79
+ template <unsigned int GroupId, ScanType TyScan>
80
+ struct scan_dispatch;
81
+
82
+ template <ScanType TyScan>
83
+ struct scan_dispatch<details::coalesced_group_id, TyScan> {
84
+ template <typename TyGroup, typename TyVal, typename TyFn>
85
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
86
+ auto scan_result = coalesced_inclusive_scan(group, val, op);
87
+ if (TyScan == ScanType::exclusive) {
88
+ scan_result = convert_inclusive_to_exclusive(group,
89
+ scan_result,
90
+ _CG_STL_NAMESPACE::forward<TyVal>(val),
91
+ _CG_STL_NAMESPACE::forward<TyFn>(op));
92
+ }
93
+ return scan_result;
94
+ }
95
+ };
96
+
97
+ #if defined(_CG_CPP11_FEATURES)
98
+ template <ScanType TyScan>
99
+ struct scan_dispatch<details::multi_tile_group_id, TyScan> {
100
+ template <unsigned int Size, typename ParentT, typename TyVal, typename TyFn>
101
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
102
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
103
+ using TyRet = details::remove_qual<TyVal>;
104
+ const unsigned int num_warps = Size / 32;
105
+ // In warp scan result, calculated in warp_lambda
106
+ TyRet warp_scan;
107
+
108
+ // In warp scan, put sum in the warp_scratch_location
109
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
110
+ warp_scan =
111
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
112
+ if (warp.thread_rank() + 1 == warp.size()) {
113
+ *warp_scratch_location = warp_scan;
114
+ }
115
+ if (TyScan == ScanType::exclusive) {
116
+ warp_scan = warp.shfl_up(warp_scan, 1);
117
+ }
118
+ };
119
+
120
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
121
+ // to its in-warp scan result
122
+ auto inter_warp_lambda =
123
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
124
+ auto thread_val = *thread_scratch_location;
125
+ auto result = coalesced_inclusive_scan(subwarp, thread_val, op);
126
+ *thread_scratch_location = convert_inclusive_to_exclusive(subwarp, result, thread_val, op);
127
+ };
128
+
129
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
130
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
131
+ return previous_warps_sum;
132
+ }
133
+ if (warpType::meta_group_rank() == 0) {
134
+ return warp_scan;
135
+ }
136
+ else {
137
+ return op(warp_scan, previous_warps_sum);
138
+ }
139
+ }
140
+ };
141
+
142
+ #if defined(_CG_HAS_STL_ATOMICS)
143
+ template <unsigned int GroupId, ScanType TyScan>
144
+ struct scan_update_dispatch;
145
+
146
+ template <ScanType TyScan>
147
+ struct scan_update_dispatch<details::coalesced_group_id, TyScan> {
148
+ template <typename TyGroup, typename TyAtomic, typename TyVal, typename TyFn>
149
+ _CG_STATIC_QUALIFIER auto scan(const TyGroup& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
150
+ details::remove_qual<TyVal> old;
151
+
152
+ // Do regular in group scan
153
+ auto scan_result = details::coalesced_inclusive_scan(group, val, op);
154
+
155
+ // Last thread updates the atomic and distributes its old value to other threads
156
+ if (group.thread_rank() == group.size() - 1) {
157
+ old = atomic_update(dst, scan_result, _CG_STL_NAMESPACE::forward<TyFn>(op));
158
+ }
159
+ old = group.shfl(old, group.size() - 1);
160
+ if (TyScan == ScanType::exclusive) {
161
+ scan_result = convert_inclusive_to_exclusive(group, scan_result, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
162
+ }
163
+ scan_result = op(old, scan_result);
164
+ return scan_result;
165
+ }
166
+ };
167
+
168
+ template <ScanType TyScan>
169
+ struct scan_update_dispatch<details::multi_tile_group_id, TyScan> {
170
+ template <unsigned int Size, typename ParentT, typename TyAtomic, typename TyVal, typename TyFn>
171
+ _CG_STATIC_QUALIFIER auto scan(const thread_block_tile<Size, ParentT>& group, TyAtomic& dst, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
172
+ using warpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
173
+ using TyRet = details::remove_qual<TyVal>;
174
+ const unsigned int num_warps = Size / 32;
175
+ // In warp scan result, calculated in warp_lambda
176
+ TyRet warp_scan;
177
+
178
+ // In warp scan, put sum in the warp_scratch_location
179
+ auto warp_lambda = [&] (const warpType& warp, TyRet* warp_scratch_location) {
180
+ warp_scan =
181
+ details::coalesced_inclusive_scan(warp, _CG_STL_NAMESPACE::forward<TyVal>(val), op);
182
+ if (warp.thread_rank() + 1 == warp.size()) {
183
+ *warp_scratch_location = warp_scan;
184
+ }
185
+ if (TyScan == ScanType::exclusive) {
186
+ warp_scan = warp.shfl_up(warp_scan, 1);
187
+ }
188
+ };
189
+
190
+ // Tile of size num_warps performing the final scan part (exclusive scan of warp sums), other threads will add it
191
+ // to its in-warp scan result
192
+ auto inter_warp_lambda =
193
+ [&] (const details::internal_thread_block_tile<num_warps, warpType>& subwarp, TyRet* thread_scratch_location) {
194
+ auto thread_val = *thread_scratch_location;
195
+ auto scan_result = details::coalesced_inclusive_scan(subwarp, thread_val, op);
196
+ TyRet offset;
197
+ // Single thread does the atomic update with sum of all contributions and reads the old value.
198
+ if (subwarp.thread_rank() == subwarp.size() - 1) {
199
+ offset = details::atomic_update(dst, scan_result, op);
200
+ }
201
+ offset = subwarp.shfl(offset, subwarp.size() - 1);
202
+ scan_result = convert_inclusive_to_exclusive(subwarp, scan_result, thread_val, op);
203
+ // Add offset read from the atomic to the scanned warp sum.
204
+ // Skipping first thread, since it got defautly constructed value from the conversion,
205
+ // it should just return the offset received from the thread that did the atomic update.
206
+ if (subwarp.thread_rank() != 0) {
207
+ offset = op(scan_result, offset);
208
+ }
209
+ *thread_scratch_location = offset;
210
+ };
211
+
212
+ TyRet previous_warps_sum = details::multi_warp_collectives_helper<TyRet>(group, warp_lambda, inter_warp_lambda);
213
+ if (TyScan == ScanType::exclusive && warpType::thread_rank() == 0) {
214
+ return previous_warps_sum;
215
+ }
216
+ return op(warp_scan, previous_warps_sum);
217
+ }
218
+ };
219
+ #endif
220
+ #endif
221
+
222
+ template <typename TyGroup, typename TyInputVal, typename TyRetVal>
223
+ _CG_QUALIFIER void check_scan_params() {
224
+ static_assert(details::is_op_type_same<TyInputVal, TyRetVal>::value, "Operator input and output types differ");
225
+ static_assert(details::scan_group_supported<TyGroup>::value, "This group does not exclusively represent a tile");
226
+ }
227
+
228
+ #if defined(_CG_HAS_STL_ATOMICS)
229
+ template <typename TyGroup, typename TyDstVal, typename TyInputVal, typename TyRetVal>
230
+ _CG_QUALIFIER void check_scan_update_params() {
231
+ check_scan_params<TyGroup, TyInputVal, TyRetVal>();
232
+ static_assert(details::is_op_type_same<TyDstVal, TyInputVal>::value, "Destination and input types differ");
233
+ }
234
+ #endif
235
+
236
+ } // details
237
+
238
+ template <typename TyGroup, typename TyVal, typename TyFn>
239
+ _CG_QUALIFIER auto inclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
240
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
241
+
242
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
243
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
244
+ }
245
+
246
+ template <typename TyGroup, typename TyVal>
247
+ _CG_QUALIFIER details::remove_qual<TyVal> inclusive_scan(const TyGroup& group, TyVal&& val) {
248
+ return inclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
249
+ }
250
+
251
+ template <typename TyGroup, typename TyVal, typename TyFn>
252
+ _CG_QUALIFIER auto exclusive_scan(const TyGroup& group, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
253
+ details::check_scan_params<TyGroup, TyVal, decltype(op(val, val))>();
254
+
255
+ using dispatch = details::scan_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
256
+ return dispatch::scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
257
+ }
258
+
259
+ template <typename TyGroup, typename TyVal>
260
+ _CG_QUALIFIER details::remove_qual<TyVal> exclusive_scan(const TyGroup& group, TyVal&& val) {
261
+ return exclusive_scan(group, _CG_STL_NAMESPACE::forward<TyVal>(val), cooperative_groups::plus<details::remove_qual<TyVal>>());
262
+ }
263
+
264
+ #if defined(_CG_HAS_STL_ATOMICS)
265
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
266
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
267
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
268
+
269
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
270
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
271
+ }
272
+
273
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
274
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco> & dst, TyInputVal&& val) {
275
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
276
+ }
277
+
278
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
279
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
280
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
281
+
282
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
283
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
284
+ }
285
+
286
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
287
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, cuda::atomic<TyVal, Sco>& dst, TyInputVal&& val) {
288
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
289
+ }
290
+
291
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
292
+ _CG_QUALIFIER auto inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
293
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
294
+
295
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::inclusive>;
296
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
297
+ }
298
+
299
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
300
+ _CG_QUALIFIER TyVal inclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco> & dst, TyInputVal&& val) {
301
+ return inclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
302
+ }
303
+
304
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco, typename TyFn>
305
+ _CG_QUALIFIER auto exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val, TyFn&& op) -> decltype(op(val, val)) {
306
+ details::check_scan_update_params<TyGroup, TyVal, details::remove_qual<TyInputVal>, decltype(op(val, val))>();
307
+
308
+ using dispatch = details::scan_update_dispatch<TyGroup::_group_id, details::ScanType::exclusive>;
309
+ return dispatch::scan(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), _CG_STL_NAMESPACE::forward<TyFn>(op));
310
+ }
311
+
312
+ template<typename TyGroup, typename TyVal, typename TyInputVal, cuda::thread_scope Sco>
313
+ _CG_QUALIFIER TyVal exclusive_scan_update(const TyGroup& group, const cuda::atomic_ref<TyVal, Sco>& dst, TyInputVal&& val) {
314
+ return exclusive_scan_update(group, dst, _CG_STL_NAMESPACE::forward<TyInputVal>(val), cooperative_groups::plus<TyVal>());
315
+ }
316
+ #endif
317
+
318
+ _CG_END_NAMESPACE
319
+
320
+ #endif // _CG_SCAN_H_
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/sync.h ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_GRID_H
50
+ #define _CG_GRID_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details
57
+ {
58
+
59
+ typedef unsigned int barrier_t;
60
+
61
+ _CG_STATIC_QUALIFIER bool bar_has_flipped(unsigned int old_arrive, unsigned int current_arrive) {
62
+ return (((old_arrive ^ current_arrive) & 0x80000000) != 0);
63
+ }
64
+
65
+ _CG_STATIC_QUALIFIER void sync_grids(unsigned int expected, volatile barrier_t *arrived) {
66
+ bool cta_master = (threadIdx.x + threadIdx.y + threadIdx.z == 0);
67
+ bool gpu_master = (blockIdx.x + blockIdx.y + blockIdx.z == 0);
68
+
69
+ __barrier_sync(0);
70
+
71
+ if (cta_master) {
72
+ unsigned int oldArrive;
73
+ unsigned int nb = 1;
74
+ if (gpu_master) {
75
+ nb = 0x80000000 - (expected - 1);
76
+ }
77
+
78
+ #if __CUDA_ARCH__ < 700
79
+ // Fence; barrier update; volatile polling; fence
80
+ __threadfence();
81
+
82
+ oldArrive = atomicAdd((unsigned int*)arrived, nb);
83
+
84
+ while (!bar_has_flipped(oldArrive, *arrived));
85
+
86
+ __threadfence();
87
+ #else
88
+ // Barrier update with release; polling with acquire
89
+ asm volatile("atom.add.release.gpu.u32 %0,[%1],%2;" : "=r"(oldArrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int*)arrived), "r"(nb) : "memory");
90
+
91
+ unsigned int current_arrive;
92
+ do {
93
+ asm volatile("ld.acquire.gpu.u32 %0,[%1];" : "=r"(current_arrive) : _CG_ASM_PTR_CONSTRAINT((unsigned int *)arrived) : "memory");
94
+ } while (!bar_has_flipped(oldArrive, current_arrive));
95
+ #endif
96
+ }
97
+
98
+ __barrier_sync(0);
99
+ }
100
+
101
+ /* - Multi warp groups synchronization routines - */
102
+
103
+ // Need both acquire and release for the last warp, since it won't be able to acquire with red.and
104
+ _CG_STATIC_QUALIFIER unsigned int atom_or_acq_rel_cta(unsigned int *addr, unsigned int val) {
105
+ unsigned int old;
106
+ #if __CUDA_ARCH__ < 700
107
+ __threadfence_block();
108
+ old = atomicOr(addr, val);
109
+ #else
110
+ asm volatile("atom.or.acq_rel.cta.b32 %0,[%1],%2;" : "=r"(old) : _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
111
+ #endif
112
+ return old;
113
+ }
114
+
115
+ // Special case where barrier is arrived, but not waited on
116
+ _CG_STATIC_QUALIFIER void red_or_release_cta(unsigned int *addr, unsigned int val) {
117
+ #if __CUDA_ARCH__ < 700
118
+ __threadfence_block();
119
+ atomicOr(addr, val);
120
+ #else
121
+ asm volatile("red.or.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
122
+ #endif
123
+ }
124
+
125
+ // Usually called by last arriving warp to released other warps, can be relaxed, since or was already acq_rel
126
+ _CG_STATIC_QUALIFIER void red_and_relaxed_cta(unsigned int *addr, unsigned int val) {
127
+ #if __CUDA_ARCH__ < 700
128
+ atomicAnd(addr, val);
129
+ #else
130
+ asm volatile("red.and.relaxed.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
131
+ #endif
132
+ }
133
+
134
+ // Special case of release, where last warp was doing extra work before releasing others, need to be release
135
+ // to ensure that extra work is visible
136
+ _CG_STATIC_QUALIFIER void red_and_release_cta(unsigned int *addr, unsigned int val) {
137
+ #if __CUDA_ARCH__ < 700
138
+ __threadfence_block();
139
+ atomicAnd(addr, val);
140
+ #else
141
+ asm volatile("red.and.release.cta.b32 [%0],%1;" :: _CG_ASM_PTR_CONSTRAINT(addr), "r"(val) : "memory");
142
+ #endif
143
+ }
144
+
145
+ // Read the barrier, acquire to ensure all memory operations following the sync are correctly performed after it is released
146
+ _CG_STATIC_QUALIFIER unsigned int ld_acquire_cta(unsigned int *addr) {
147
+ unsigned int val;
148
+ #if __CUDA_ARCH__ < 700
149
+ val = *((volatile unsigned int*) addr);
150
+ __threadfence_block();
151
+ #else
152
+ asm volatile("ld.acquire.cta.u32 %0,[%1];" : "=r"(val) : _CG_ASM_PTR_CONSTRAINT(addr) : "memory");
153
+ #endif
154
+ return val;
155
+ }
156
+
157
+ // Get synchronization bit mask of my thread_block_tile of size num_warps. Thread ranks 0..31 have the first bit assigned to them,
158
+ // thread ranks 32..63 second etc
159
+ // Bit masks are unique for each group, groups of the same size will have the same number of bits set, but on different positions
160
+ _CG_STATIC_QUALIFIER unsigned int get_group_mask(unsigned int thread_rank, unsigned int num_warps) {
161
+ return num_warps == 32 ? ~0 : ((1 << num_warps) - 1) << (num_warps * (thread_rank / (num_warps * 32)));
162
+ }
163
+
164
+ _CG_STATIC_QUALIFIER void barrier_wait(barrier_t *arrived, unsigned int warp_bit) {
165
+ while(ld_acquire_cta(arrived) & warp_bit);
166
+ }
167
+
168
+ // Default blocking sync.
169
+ _CG_STATIC_QUALIFIER void sync_warps(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
170
+ unsigned int warp_id = thread_rank / 32;
171
+ bool warp_master = (thread_rank % 32 == 0);
172
+ unsigned int warp_bit = 1 << warp_id;
173
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
174
+
175
+ __syncwarp(0xFFFFFFFF);
176
+
177
+ if (warp_master) {
178
+ unsigned int old = atom_or_acq_rel_cta(arrived, warp_bit);
179
+ if (((old | warp_bit) & group_mask) == group_mask) {
180
+ red_and_relaxed_cta(arrived, ~group_mask);
181
+ }
182
+ else {
183
+ barrier_wait(arrived, warp_bit);
184
+ }
185
+ }
186
+
187
+ __syncwarp(0xFFFFFFFF);
188
+ }
189
+
190
+ // Blocking sync, except the last arriving warp, that releases other warps, returns to do other stuff first.
191
+ // Warp returning true from this function needs to call sync_warps_release.
192
+ _CG_STATIC_QUALIFIER bool sync_warps_last_releases(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
193
+ unsigned int warp_id = thread_rank / 32;
194
+ bool warp_master = (thread_rank % 32 == 0);
195
+ unsigned int warp_bit = 1 << warp_id;
196
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
197
+
198
+ __syncwarp(0xFFFFFFFF);
199
+
200
+ unsigned int old = 0;
201
+ if (warp_master) {
202
+ old = atom_or_acq_rel_cta(arrived, warp_bit);
203
+ }
204
+ old = __shfl_sync(0xFFFFFFFF, old, 0);
205
+ if (((old | warp_bit) & group_mask) == group_mask) {
206
+ return true;
207
+ }
208
+ barrier_wait(arrived, warp_bit);
209
+
210
+ return false;
211
+ }
212
+
213
+ // Release my group from the barrier.
214
+ _CG_STATIC_QUALIFIER void sync_warps_release(barrier_t *arrived, bool is_master, unsigned int thread_rank, unsigned int num_warps) {
215
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
216
+ if (is_master) {
217
+ red_and_release_cta(arrived, ~group_mask);
218
+ }
219
+ }
220
+
221
+ // Arrive at my group barrier, but don't block or release the barrier, even if every one arrives.
222
+ // sync_warps_release needs to be called by some warp after this one to reset the barrier.
223
+ _CG_STATIC_QUALIFIER void sync_warps_arrive(barrier_t *arrived, unsigned int thread_rank, unsigned int num_warps) {
224
+ unsigned int warp_id = thread_rank / 32;
225
+ bool warp_master = (thread_rank % 32 == 0);
226
+ unsigned int warp_bit = 1 << warp_id;
227
+ unsigned int group_mask = get_group_mask(thread_rank, num_warps);
228
+
229
+ __syncwarp(0xFFFFFFFF);
230
+
231
+ if (warp_master) {
232
+ red_or_release_cta(arrived, warp_bit);
233
+ }
234
+ }
235
+
236
+ // Wait for my warp to be released from the barrier. Warp must have arrived first.
237
+ _CG_STATIC_QUALIFIER void sync_warps_wait(barrier_t *arrived, unsigned int thread_rank) {
238
+ unsigned int warp_id = thread_rank / 32;
239
+ unsigned int warp_bit = 1 << warp_id;
240
+
241
+ barrier_wait(arrived, warp_bit);
242
+ }
243
+
244
+ // Wait for specific warp to arrive at the barrier
245
+ _CG_QUALIFIER void sync_warps_wait_for_specific_warp(barrier_t *arrived, unsigned int wait_warp_id) {
246
+ unsigned int wait_mask = 1 << wait_warp_id;
247
+ while((ld_acquire_cta(arrived) & wait_mask) != wait_mask);
248
+ }
249
+
250
+ // Initialize the bit corresponding to my warp in the barrier
251
+ _CG_QUALIFIER void sync_warps_reset(barrier_t *arrived, unsigned int thread_rank) {
252
+ unsigned int warp_id = thread_rank / 32;
253
+ unsigned int warp_bit = 1 << warp_id;
254
+
255
+ __syncwarp(0xFFFFFFFF);
256
+
257
+ if (thread_rank % 32 == 0) {
258
+ red_and_release_cta(arrived, ~warp_bit);
259
+ }
260
+ // No need to sync after the atomic, there will be a sync of the group that is being partitioned right after this.
261
+ }
262
+
263
+ } // details
264
+
265
+ _CG_END_NAMESPACE
266
+
267
+ #endif // _CG_GRID_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC
50
+ #define _COOPERATIVE_GROUPS_MEMCPY_ASYNC
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/async.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+ #endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_REDUCE_H
50
+ #define _COOPERATIVE_GROUPS_REDUCE_H
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/reduce.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+
63
+ #endif //_COOPERATIVE_GROUPS_REDUCE_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_SCAN_H
50
+ #define _COOPERATIVE_GROUPS_SCAN_H
51
+
52
+ #include "../cooperative_groups.h"
53
+ #include "details/info.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ # include "details/scan.h"
57
+ #else
58
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
59
+ -std=c++11 compiler option.
60
+ #endif
61
+
62
+
63
+ #endif //_COOPERATIVE_GROUPS_SCAN_H
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGL.h ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAEGL_H
51
+ #define CUDAEGL_H
52
+
53
+ #include "cuda.h"
54
+ #include "EGL/egl.h"
55
+ #include "EGL/eglext.h"
56
+
57
+
58
+ #ifdef CUDA_FORCE_API_VERSION
59
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
60
+ #endif
61
+
62
+ #ifdef __cplusplus
63
+ extern "C" {
64
+ #endif
65
+
66
+ /**
67
+ * \addtogroup CUDA_TYPES
68
+ * @{
69
+ */
70
+
71
+ /**
72
+ * Maximum number of planes per frame
73
+ */
74
+ #define MAX_PLANES 3
75
+
76
+ /**
77
+ * CUDA EglFrame type - array or pointer
78
+ */
79
+ typedef enum CUeglFrameType_enum {
80
+ CU_EGL_FRAME_TYPE_ARRAY = 0, /**< Frame type CUDA array */
81
+ CU_EGL_FRAME_TYPE_PITCH = 1, /**< Frame type pointer */
82
+ } CUeglFrameType;
83
+
84
+ /**
85
+ * Indicates that timeout for ::cuEGLStreamConsumerAcquireFrame is infinite.
86
+ */
87
+ #define CUDA_EGL_INFINITE_TIMEOUT 0xFFFFFFFF
88
+
89
+ /**
90
+ * Resource location flags- sysmem or vidmem
91
+ *
92
+ * For CUDA context on iGPU, since video and system memory are equivalent -
93
+ * these flags will not have an effect on the execution.
94
+ *
95
+ * For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags
96
+ * to give a hint about the desired location.
97
+ *
98
+ * ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory
99
+ * to be accessed by CUDA.
100
+ *
101
+ * ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated
102
+ * video memory to be accessed by CUDA.
103
+ *
104
+ * There may be an additional latency due to new allocation and data migration,
105
+ * if the frame is produced on a different memory.
106
+
107
+ */
108
+ typedef enum CUeglResourceLocationFlags_enum {
109
+ CU_EGL_RESOURCE_LOCATION_SYSMEM = 0x00, /**< Resource location sysmem */
110
+ CU_EGL_RESOURCE_LOCATION_VIDMEM = 0x01 /**< Resource location vidmem */
111
+ } CUeglResourceLocationFlags;
112
+
113
+ /**
114
+ * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
115
+ * Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY
116
+ */
117
+ typedef enum CUeglColorFormat_enum {
118
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0x00, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
119
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR = 0x01, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
120
+ CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 0x02, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
121
+ CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 0x03, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
122
+ CU_EGL_COLOR_FORMAT_RGB = 0x04, /**< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. */
123
+ CU_EGL_COLOR_FORMAT_BGR = 0x05, /**< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. */
124
+ CU_EGL_COLOR_FORMAT_ARGB = 0x06, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
125
+ CU_EGL_COLOR_FORMAT_RGBA = 0x07, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
126
+ CU_EGL_COLOR_FORMAT_L = 0x08, /**< single luminance channel in one surface. */
127
+ CU_EGL_COLOR_FORMAT_R = 0x09, /**< single color channel in one surface. */
128
+ CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 0x0A, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
129
+ CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 0x0B, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
130
+ CU_EGL_COLOR_FORMAT_YUYV_422 = 0x0C, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
131
+ CU_EGL_COLOR_FORMAT_UYVY_422 = 0x0D, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
132
+ CU_EGL_COLOR_FORMAT_ABGR = 0x0E, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
133
+ CU_EGL_COLOR_FORMAT_BGRA = 0x0F, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
134
+ CU_EGL_COLOR_FORMAT_A = 0x10, /**< Alpha color format - one channel in one surface. */
135
+ CU_EGL_COLOR_FORMAT_RG = 0x11, /**< R/G color format - two channels in one surface with GR byte ordering */
136
+ CU_EGL_COLOR_FORMAT_AYUV = 0x12, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
137
+ CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 0x13, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
138
+ CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 0x14, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
139
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 0x15, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
140
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 0x16, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
141
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 0x17, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
142
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 0x18, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
143
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 0x19, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
144
+ CU_EGL_COLOR_FORMAT_VYUY_ER = 0x1A, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
145
+ CU_EGL_COLOR_FORMAT_UYVY_ER = 0x1B, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
146
+ CU_EGL_COLOR_FORMAT_YUYV_ER = 0x1C, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
147
+ CU_EGL_COLOR_FORMAT_YVYU_ER = 0x1D, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
148
+ CU_EGL_COLOR_FORMAT_YUV_ER = 0x1E, /**< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */
149
+ CU_EGL_COLOR_FORMAT_YUVA_ER = 0x1F, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
150
+ CU_EGL_COLOR_FORMAT_AYUV_ER = 0x20, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
151
+ CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 0x21, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
152
+ CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 0x22, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
153
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 0x23, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
154
+ CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 0x24, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
155
+ CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 0x25, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
156
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 0x26, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
157
+ CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 0x27, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
158
+ CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 0x28, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
159
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 0x29, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
160
+ CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 0x2A, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
161
+ CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 0x2B, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
162
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 0x2C, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
163
+ CU_EGL_COLOR_FORMAT_BAYER_RGGB = 0x2D, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
164
+ CU_EGL_COLOR_FORMAT_BAYER_BGGR = 0x2E, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
165
+ CU_EGL_COLOR_FORMAT_BAYER_GRBG = 0x2F, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
166
+ CU_EGL_COLOR_FORMAT_BAYER_GBRG = 0x30, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
167
+ CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 0x31, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
168
+ CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 0x32, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
169
+ CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 0x33, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
170
+ CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 0x34, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
171
+ CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 0x35, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
172
+ CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 0x36, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
173
+ CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 0x37, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
174
+ CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 0x38, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
175
+ CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 0x39, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
176
+ CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 0x3A, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
177
+ CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 0x3B, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
178
+ CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 0x3C, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
179
+ CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 0x3D, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
180
+ CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 0x3E, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
181
+ CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 0x3F, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
182
+ CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 0x40, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
183
+ CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 0x41, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
184
+ CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 0x42, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
185
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 0x43, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
186
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 0x44, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
187
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 0x45, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
188
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 0x46, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
189
+ CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 0x47, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
190
+ CU_EGL_COLOR_FORMAT_BAYER_BCCR = 0x48, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
191
+ CU_EGL_COLOR_FORMAT_BAYER_RCCB = 0x49, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
192
+ CU_EGL_COLOR_FORMAT_BAYER_CRBC = 0x4A, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
193
+ CU_EGL_COLOR_FORMAT_BAYER_CBRC = 0x4B, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
194
+ CU_EGL_COLOR_FORMAT_BAYER10_CCCC = 0x4C, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
195
+ CU_EGL_COLOR_FORMAT_BAYER12_BCCR = 0x4D, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
196
+ CU_EGL_COLOR_FORMAT_BAYER12_RCCB = 0x4E, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
197
+ CU_EGL_COLOR_FORMAT_BAYER12_CRBC = 0x4F, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
198
+ CU_EGL_COLOR_FORMAT_BAYER12_CBRC = 0x50, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
199
+ CU_EGL_COLOR_FORMAT_BAYER12_CCCC = 0x51, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
200
+ CU_EGL_COLOR_FORMAT_Y = 0x52, /**< Color format for single Y plane. */
201
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 = 0x53, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
202
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 = 0x54, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
203
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 = 0x55, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. */
204
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 = 0x56, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height
205
+ = 1/2 Y height. */
206
+ CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 = 0x57, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
207
+ CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 = 0x58, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
208
+ CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 = 0x59, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height
209
+ = 1/2 Y height. */
210
+ CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 = 0x5A, /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
211
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 = 0x5B, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
212
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 = 0x5C, /**< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
213
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 = 0x5D, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
214
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR = 0x5E, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
215
+ CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 = 0x5F, /**< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
216
+ CU_EGL_COLOR_FORMAT_Y_ER = 0x60, /**< Extended Range Color format for single Y plane. */
217
+ CU_EGL_COLOR_FORMAT_Y_709_ER = 0x61, /**< Extended Range Color format for single Y plane. */
218
+ CU_EGL_COLOR_FORMAT_Y10_ER = 0x62, /**< Extended Range Color format for single Y10 plane. */
219
+ CU_EGL_COLOR_FORMAT_Y10_709_ER = 0x63, /**< Extended Range Color format for single Y10 plane. */
220
+ CU_EGL_COLOR_FORMAT_Y12_ER = 0x64, /**< Extended Range Color format for single Y12 plane. */
221
+ CU_EGL_COLOR_FORMAT_Y12_709_ER = 0x65, /**< Extended Range Color format for single Y12 plane. */
222
+ CU_EGL_COLOR_FORMAT_YUVA = 0x66, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
223
+ CU_EGL_COLOR_FORMAT_YUV = 0x67, /**< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. */
224
+ CU_EGL_COLOR_FORMAT_YVYU = 0x68, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
225
+ CU_EGL_COLOR_FORMAT_VYUY = 0x69, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
226
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER = 0x6A, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
227
+ CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER = 0x6B, /**< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
228
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER = 0x6C, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
229
+ CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER = 0x6D, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
230
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER = 0x6E, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
231
+ CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER = 0x6F, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
232
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER = 0x70, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
233
+ CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER = 0x71, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
234
+ CU_EGL_COLOR_FORMAT_MAX
235
+ } CUeglColorFormat;
236
+
237
+ /**
238
+ * CUDA EGLFrame structure Descriptor - structure defining one frame of EGL.
239
+ *
240
+ * Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.
241
+ */
242
+ typedef struct CUeglFrame_st {
243
+ union {
244
+ CUarray pArray[MAX_PLANES]; /**< Array of CUarray corresponding to each plane*/
245
+ void* pPitch[MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
246
+ } frame;
247
+ unsigned int width; /**< Width of first plane */
248
+ unsigned int height; /**< Height of first plane */
249
+ unsigned int depth; /**< Depth of first plane */
250
+ unsigned int pitch; /**< Pitch of first plane */
251
+ unsigned int planeCount; /**< Number of planes */
252
+ unsigned int numChannels; /**< Number of channels for the plane */
253
+ CUeglFrameType frameType; /**< Array or Pitch */
254
+ CUeglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
255
+ CUarray_format cuFormat; /**< CUDA Array Format*/
256
+ } CUeglFrame_v1;
257
+ typedef CUeglFrame_v1 CUeglFrame;
258
+
259
+ /**
260
+ * CUDA EGLSream Connection
261
+ */
262
+ typedef struct CUeglStreamConnection_st* CUeglStreamConnection;
263
+
264
+ /** @} */ /* END CUDA_TYPES */
265
+
266
+ /**
267
+ * \file cudaEGL.h
268
+ * \brief Header file for the EGL interoperability functions of the
269
+ * low-level CUDA driver application programming interface.
270
+ */
271
+
272
+ /**
273
+ * \defgroup CUDA_EGL EGL Interoperability
274
+ * \ingroup CUDA_DRIVER
275
+ *
276
+ * ___MANBRIEF___ EGL interoperability functions of the low-level CUDA
277
+ * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
278
+ *
279
+ * This section describes the EGL interoperability functions of the
280
+ * low-level CUDA driver application programming interface.
281
+ *
282
+ * @{
283
+ */
284
+
285
+ /**
286
+ * \brief Registers an EGL image
287
+ *
288
+ * Registers the EGLImageKHR specified by \p image for access by
289
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
290
+ * Additional Mapping/Unmapping is not required for the registered resource and
291
+ * ::cuGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
292
+ *
293
+ * The application will be responsible for synchronizing access to shared objects.
294
+ * The application must ensure that any pending operation which access the objects have completed
295
+ * before passing control to CUDA. This may be accomplished by issuing and waiting for
296
+ * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
297
+ * The application will be also responsible for ensuring that any pending operation on the
298
+ * registered CUDA resource has completed prior to executing subsequent commands in other APIs
299
+ * accesing the same memory objects.
300
+ * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
301
+ *
302
+ * The surface's intended usage is specified using \p flags, as follows:
303
+ *
304
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
305
+ * resource will be used. It is therefore assumed that this resource will be
306
+ * read from and written to by CUDA. This is the default value.
307
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
308
+ * will not write to this resource.
309
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
310
+ * CUDA will not read from this resource and will write over the
311
+ * entire contents of the resource, so none of the data previously
312
+ * stored in the resource will be preserved.
313
+ *
314
+ * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
315
+ * typedef void* EGLImageKHR
316
+ *
317
+ * \param pCudaResource - Pointer to the returned object handle
318
+ * \param image - An EGLImageKHR image which can be used to create target resource.
319
+ * \param flags - Map flags
320
+ *
321
+ * \return
322
+ * ::CUDA_SUCCESS,
323
+ * ::CUDA_ERROR_INVALID_HANDLE,
324
+ * ::CUDA_ERROR_ALREADY_MAPPED,
325
+ * ::CUDA_ERROR_INVALID_CONTEXT,
326
+ *
327
+ * \sa ::cuGraphicsEGLRegisterImage, ::cuGraphicsUnregisterResource,
328
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
329
+ * ::cuGraphicsUnmapResources,
330
+ * ::cudaGraphicsEGLRegisterImage
331
+ */
332
+ CUresult CUDAAPI cuGraphicsEGLRegisterImage(CUgraphicsResource *pCudaResource, EGLImageKHR image, unsigned int flags);
333
+
334
+ /**
335
+ * \brief Connect CUDA to EGLStream as a consumer.
336
+ *
337
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream.
338
+ *
339
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
340
+ * API to another.
341
+ *
342
+ * \param conn - Pointer to the returned connection handle
343
+ * \param stream - EGLStreamKHR handle
344
+ *
345
+ * \return
346
+ * ::CUDA_SUCCESS,
347
+ * ::CUDA_ERROR_INVALID_HANDLE,
348
+ * ::CUDA_ERROR_INVALID_CONTEXT,
349
+ *
350
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
351
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
352
+ * ::cudaEGLStreamConsumerConnect
353
+ */
354
+ CUresult CUDAAPI cuEGLStreamConsumerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream);
355
+
356
+ /**
357
+ * \brief Connect CUDA to EGLStream as a consumer with given flags.
358
+ *
359
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags.
360
+ *
361
+ * The flags specify whether the consumer wants to access frames from system memory or video memory.
362
+ * Default is ::CU_EGL_RESOURCE_LOCATION_VIDMEM.
363
+ *
364
+ * \param conn - Pointer to the returned connection handle
365
+ * \param stream - EGLStreamKHR handle
366
+ * \param flags - Flags denote intended location - system or video.
367
+ *
368
+ * \return
369
+ * ::CUDA_SUCCESS,
370
+ * ::CUDA_ERROR_INVALID_HANDLE,
371
+ * ::CUDA_ERROR_INVALID_CONTEXT,
372
+ *
373
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
374
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
375
+ * ::cudaEGLStreamConsumerConnectWithFlags
376
+ */
377
+
378
+ CUresult CUDAAPI cuEGLStreamConsumerConnectWithFlags(CUeglStreamConnection *conn, EGLStreamKHR stream, unsigned int flags);
379
+
380
+ /**
381
+ * \brief Disconnect CUDA as a consumer to EGLStream .
382
+ *
383
+ * Disconnect CUDA as a consumer to EGLStreamKHR.
384
+ *
385
+ * \param conn - Conection to disconnect.
386
+ *
387
+ * \return
388
+ * ::CUDA_SUCCESS,
389
+ * ::CUDA_ERROR_INVALID_HANDLE,
390
+ * ::CUDA_ERROR_INVALID_CONTEXT,
391
+ *
392
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
393
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
394
+ * ::cudaEGLStreamConsumerDisconnect
395
+ */
396
+ CUresult CUDAAPI cuEGLStreamConsumerDisconnect(CUeglStreamConnection *conn);
397
+
398
+ /**
399
+ * \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
400
+ *
401
+ * Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented
402
+ * by the producer unless explicitly disabled by setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE
403
+ * during stream initialization. By default, EGLStream is created with this flag set to EGL_TRUE.
404
+ * ::cuGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
405
+ * ::CUeglFrame.
406
+ *
407
+ * \param conn - Connection on which to acquire
408
+ * \param pCudaResource - CUDA resource on which the stream frame will be mapped for use.
409
+ * \param pStream - CUDA stream for synchronization and any data migrations
410
+ * implied by ::CUeglResourceLocationFlags.
411
+ * \param timeout - Desired timeout in usec for a new frame to be acquired.
412
+ * If set as ::CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely.
413
+ * After timeout occurs CUDA consumer tries to acquire an old frame
414
+ * if available and EGL_SUPPORT_REUSE_NV flag is set.
415
+ *
416
+ * \return
417
+ * ::CUDA_SUCCESS,
418
+ * ::CUDA_ERROR_INVALID_HANDLE,
419
+ * ::CUDA_ERROR_LAUNCH_TIMEOUT,
420
+ *
421
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
422
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
423
+ * ::cudaEGLStreamConsumerAcquireFrame
424
+ */
425
+ CUresult CUDAAPI cuEGLStreamConsumerAcquireFrame(CUeglStreamConnection *conn,
426
+ CUgraphicsResource *pCudaResource, CUstream *pStream, unsigned int timeout);
427
+ /**
428
+ * \brief Releases the last frame acquired from the EGLStream.
429
+ *
430
+ * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
431
+ * If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the time of EGL creation
432
+ * this API doesn't release the last frame acquired on the EGLStream.
433
+ * By default, EGLStream is created with this flag set to EGL_TRUE.
434
+ *
435
+ * \param conn - Connection on which to release
436
+ * \param pCudaResource - CUDA resource whose corresponding frame is to be released
437
+ * \param pStream - CUDA stream on which release will be done.
438
+ *
439
+ * \return
440
+ * ::CUDA_SUCCESS,
441
+ * ::CUDA_ERROR_INVALID_HANDLE,
442
+ *
443
+ * \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect,
444
+ * ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame,
445
+ * ::cudaEGLStreamConsumerReleaseFrame
446
+ */
447
+ CUresult CUDAAPI cuEGLStreamConsumerReleaseFrame(CUeglStreamConnection *conn,
448
+ CUgraphicsResource pCudaResource, CUstream *pStream);
449
+
450
+ /**
451
+ * \brief Connect CUDA to EGLStream as a producer.
452
+ *
453
+ * Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
454
+ *
455
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
456
+ * API to another.
457
+ *
458
+ * \param conn - Pointer to the returned connection handle
459
+ * \param stream - EGLStreamKHR handle
460
+ * \param width - width of the image to be submitted to the stream
461
+ * \param height - height of the image to be submitted to the stream
462
+ *
463
+ * \return
464
+ * ::CUDA_SUCCESS,
465
+ * ::CUDA_ERROR_INVALID_HANDLE,
466
+ * ::CUDA_ERROR_INVALID_CONTEXT,
467
+ *
468
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
469
+ * ::cuEGLStreamProducerPresentFrame,
470
+ * ::cudaEGLStreamProducerConnect
471
+ */
472
+ CUresult CUDAAPI cuEGLStreamProducerConnect(CUeglStreamConnection *conn, EGLStreamKHR stream,
473
+ EGLint width, EGLint height);
474
+
475
+ /**
476
+ * \brief Disconnect CUDA as a producer to EGLStream .
477
+ *
478
+ * Disconnect CUDA as a producer to EGLStreamKHR.
479
+ *
480
+ * \param conn - Conection to disconnect.
481
+ *
482
+ * \return
483
+ * ::CUDA_SUCCESS,
484
+ * ::CUDA_ERROR_INVALID_HANDLE,
485
+ * ::CUDA_ERROR_INVALID_CONTEXT,
486
+ *
487
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
488
+ * ::cuEGLStreamProducerPresentFrame,
489
+ * ::cudaEGLStreamProducerDisconnect
490
+ */
491
+ CUresult CUDAAPI cuEGLStreamProducerDisconnect(CUeglStreamConnection *conn);
492
+
493
+ /**
494
+ * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
495
+ *
496
+ * When a frame is presented by the producer, it gets associated with the EGLStream
497
+ * and thus it is illegal to free the frame before the producer is disconnected.
498
+ * If a frame is freed and reused it may lead to undefined behavior.
499
+ *
500
+ * If producer and consumer are on different GPUs (iGPU and dGPU) then frametype
501
+ * ::CU_EGL_FRAME_TYPE_ARRAY is not supported. ::CU_EGL_FRAME_TYPE_PITCH can be used for
502
+ * such cross-device applications.
503
+ *
504
+ * The ::CUeglFrame is defined as:
505
+ * \code
506
+ * typedef struct CUeglFrame_st {
507
+ * union {
508
+ * CUarray pArray[MAX_PLANES];
509
+ * void* pPitch[MAX_PLANES];
510
+ * } frame;
511
+ * unsigned int width;
512
+ * unsigned int height;
513
+ * unsigned int depth;
514
+ * unsigned int pitch;
515
+ * unsigned int planeCount;
516
+ * unsigned int numChannels;
517
+ * CUeglFrameType frameType;
518
+ * CUeglColorFormat eglColorFormat;
519
+ * CUarray_format cuFormat;
520
+ * } CUeglFrame;
521
+ * \endcode
522
+ *
523
+ * For ::CUeglFrame of type ::CU_EGL_FRAME_TYPE_PITCH, the application may present sub-region of a memory
524
+ * allocation. In that case, the pitched pointer will specify the start address of the sub-region in
525
+ * the allocation and corresponding ::CUeglFrame fields will specify the dimensions of the sub-region.
526
+ *
527
+ * \param conn - Connection on which to present the CUDA array
528
+ * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
529
+ * \param pStream - CUDA stream on which to present the frame.
530
+ *
531
+ * \return
532
+ * ::CUDA_SUCCESS,
533
+ * ::CUDA_ERROR_INVALID_HANDLE,
534
+ *
535
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
536
+ * ::cuEGLStreamProducerReturnFrame,
537
+ * ::cudaEGLStreamProducerPresentFrame
538
+ */
539
+ CUresult CUDAAPI cuEGLStreamProducerPresentFrame(CUeglStreamConnection *conn,
540
+ CUeglFrame eglframe, CUstream *pStream);
541
+
542
+ /**
543
+ * \brief Return the CUDA eglFrame to the EGLStream released by the consumer.
544
+ *
545
+ * This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not
546
+ * returned a frame to EGL stream. If timeout is returned the application can retry.
547
+ *
548
+ * \param conn - Connection on which to return
549
+ * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
550
+ * \param pStream - CUDA stream on which to return the frame.
551
+ *
552
+ * \return
553
+ * ::CUDA_SUCCESS,
554
+ * ::CUDA_ERROR_INVALID_HANDLE,
555
+ * ::CUDA_ERROR_LAUNCH_TIMEOUT
556
+ *
557
+ * \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect,
558
+ * ::cuEGLStreamProducerPresentFrame,
559
+ * ::cudaEGLStreamProducerReturnFrame
560
+ */
561
+ CUresult CUDAAPI cuEGLStreamProducerReturnFrame(CUeglStreamConnection *conn,
562
+ CUeglFrame *eglframe, CUstream *pStream);
563
+
564
+ /**
565
+ * \brief Get an eglFrame through which to access a registered EGL graphics resource.
566
+ *
567
+ * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
568
+ * \p resource may be accessed.
569
+ * This API can only be called for registered EGL graphics resources.
570
+ *
571
+ * The ::CUeglFrame is defined as:
572
+ * \code
573
+ * typedef struct CUeglFrame_st {
574
+ * union {
575
+ * CUarray pArray[MAX_PLANES];
576
+ * void* pPitch[MAX_PLANES];
577
+ * } frame;
578
+ * unsigned int width;
579
+ * unsigned int height;
580
+ * unsigned int depth;
581
+ * unsigned int pitch;
582
+ * unsigned int planeCount;
583
+ * unsigned int numChannels;
584
+ * CUeglFrameType frameType;
585
+ * CUeglColorFormat eglColorFormat;
586
+ * CUarray_format cuFormat;
587
+ * } CUeglFrame;
588
+ * \endcode
589
+ *
590
+ * If \p resource is not registered then ::CUDA_ERROR_NOT_MAPPED is returned.
591
+ * *
592
+ * \param eglFrame - Returned eglFrame.
593
+ * \param resource - Registered resource to access.
594
+ * \param index - Index for cubemap surfaces.
595
+ * \param mipLevel - Mipmap level for the subresource to access.
596
+ *
597
+ * \return
598
+ * ::CUDA_SUCCESS,
599
+ * ::CUDA_ERROR_DEINITIALIZED,
600
+ * ::CUDA_ERROR_NOT_INITIALIZED,
601
+ * ::CUDA_ERROR_INVALID_CONTEXT,
602
+ * ::CUDA_ERROR_INVALID_VALUE,
603
+ * ::CUDA_ERROR_INVALID_HANDLE,
604
+ * ::CUDA_ERROR_NOT_MAPPED
605
+ *
606
+ * \sa
607
+ * ::cuGraphicsMapResources,
608
+ * ::cuGraphicsSubResourceGetMappedArray,
609
+ * ::cuGraphicsResourceGetMappedPointer,
610
+ * ::cudaGraphicsResourceGetMappedEglFrame
611
+ */
612
+ CUresult CUDAAPI cuGraphicsResourceGetMappedEglFrame(CUeglFrame* eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
613
+
614
+ /**
615
+ * \brief Creates an event from EGLSync object
616
+ *
617
+ * Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified
618
+ * via \p flags. Valid flags include:
619
+ * - ::CU_EVENT_DEFAULT: Default event creation flag.
620
+ * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking
621
+ * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on
622
+ * an event created with this flag will block until the event has actually
623
+ * been completed.
624
+ *
625
+ * Once the \p eglSync gets destroyed, ::cuEventDestroy is the only API
626
+ * that can be invoked on the event.
627
+ *
628
+ * ::cuEventRecord and TimingData are not supported for events created from EGLSync.
629
+ *
630
+ * The EGLSyncKHR is an opaque handle to an EGL sync object.
631
+ * typedef void* EGLSyncKHR
632
+ *
633
+ * \param phEvent - Returns newly created event
634
+ * \param eglSync - Opaque handle to EGLSync object
635
+ * \param flags - Event creation flags
636
+ *
637
+ * \return
638
+ * ::CUDA_SUCCESS,
639
+ * ::CUDA_ERROR_DEINITIALIZED,
640
+ * ::CUDA_ERROR_NOT_INITIALIZED,
641
+ * ::CUDA_ERROR_INVALID_CONTEXT,
642
+ * ::CUDA_ERROR_INVALID_VALUE,
643
+ * ::CUDA_ERROR_OUT_OF_MEMORY
644
+ *
645
+ * \sa
646
+ * ::cuEventQuery,
647
+ * ::cuEventSynchronize,
648
+ * ::cuEventDestroy
649
+ */
650
+ CUresult CUDAAPI cuEventCreateFromEGLSync(CUevent *phEvent, EGLSyncKHR eglSync, unsigned int flags);
651
+
652
+ /** @} */ /* END CUDA_EGL */
653
+
654
+ #ifdef __cplusplus
655
+ };
656
+ #endif
657
+
658
+ #endif
659
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAEGLTYPEDEFS_H
51
+ #define CUDAEGLTYPEDEFS_H
52
+
53
+ #include <cudaEGL.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaEGL.h
61
+ */
62
+ #define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000
63
+ #define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000
64
+ #define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000
65
+ #define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000
66
+ #define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000
67
+ #define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000
68
+ #define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000
69
+ #define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000
70
+ #define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000
71
+ #define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000
72
+ #define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000
73
+ #define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000
74
+
75
+
76
+ /**
77
+ * Type definitions for functions defined in cudaEGL.h
78
+ */
79
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags);
80
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream);
81
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags);
82
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
83
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout);
84
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream);
85
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height);
86
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
87
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream);
88
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream);
89
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
90
+ typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags);
91
+
92
+ #ifdef __cplusplus
93
+ }
94
+ #endif // __cplusplus
95
+
96
+ #endif // file guard
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGL.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAGL_H
51
+ #define CUDAGL_H
52
+
53
+ #include <cuda.h>
54
+ #include <GL/gl.h>
55
+
56
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
57
+ #define __CUDA_DEPRECATED
58
+ #elif defined(_MSC_VER)
59
+ #define __CUDA_DEPRECATED __declspec(deprecated)
60
+ #elif defined(__GNUC__)
61
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
62
+ #else
63
+ #define __CUDA_DEPRECATED
64
+ #endif
65
+
66
+ #ifdef CUDA_FORCE_API_VERSION
67
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
68
+ #endif
69
+
70
+ #if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
71
+ #define __CUDA_API_PER_THREAD_DEFAULT_STREAM
72
+ #define __CUDA_API_PTDS(api) api ## _ptds
73
+ #define __CUDA_API_PTSZ(api) api ## _ptsz
74
+ #else
75
+ #define __CUDA_API_PTDS(api) api
76
+ #define __CUDA_API_PTSZ(api) api
77
+ #endif
78
+
79
+ #define cuGLCtxCreate cuGLCtxCreate_v2
80
+ #define cuGLMapBufferObject __CUDA_API_PTDS(cuGLMapBufferObject_v2)
81
+ #define cuGLMapBufferObjectAsync __CUDA_API_PTSZ(cuGLMapBufferObjectAsync_v2)
82
+ #define cuGLGetDevices cuGLGetDevices_v2
83
+
84
+ #ifdef __cplusplus
85
+ extern "C" {
86
+ #endif
87
+
88
+ /**
89
+ * \file cudaGL.h
90
+ * \brief Header file for the OpenGL interoperability functions of the
91
+ * low-level CUDA driver application programming interface.
92
+ */
93
+
94
+ /**
95
+ * \defgroup CUDA_GL OpenGL Interoperability
96
+ * \ingroup CUDA_DRIVER
97
+ *
98
+ * ___MANBRIEF___ OpenGL interoperability functions of the low-level CUDA
99
+ * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
100
+ *
101
+ * This section describes the OpenGL interoperability functions of the
102
+ * low-level CUDA driver application programming interface. Note that mapping
103
+ * of OpenGL resources is performed with the graphics API agnostic, resource
104
+ * mapping interface described in \ref CUDA_GRAPHICS "Graphics Interoperability".
105
+ *
106
+ * @{
107
+ */
108
+
109
+ #if defined(_WIN32)
110
+ #if !defined(WGL_NV_gpu_affinity)
111
+ typedef void* HGPUNV;
112
+ #endif
113
+ #endif /* _WIN32 */
114
+
115
+ /**
116
+ * \brief Registers an OpenGL buffer object
117
+ *
118
+ * Registers the buffer object specified by \p buffer for access by
119
+ * CUDA. A handle to the registered object is returned as \p
120
+ * pCudaResource. The register flags \p Flags specify the intended usage,
121
+ * as follows:
122
+ *
123
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
124
+ * resource will be used. It is therefore assumed that this resource will be
125
+ * read from and written to by CUDA. This is the default value.
126
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
127
+ * will not write to this resource.
128
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
129
+ * CUDA will not read from this resource and will write over the
130
+ * entire contents of the resource, so none of the data previously
131
+ * stored in the resource will be preserved.
132
+ *
133
+ * \param pCudaResource - Pointer to the returned object handle
134
+ * \param buffer - name of buffer object to be registered
135
+ * \param Flags - Register flags
136
+ *
137
+ * \return
138
+ * ::CUDA_SUCCESS,
139
+ * ::CUDA_ERROR_INVALID_HANDLE,
140
+ * ::CUDA_ERROR_ALREADY_MAPPED,
141
+ * ::CUDA_ERROR_INVALID_CONTEXT,
142
+ * ::CUDA_ERROR_OPERATING_SYSTEM
143
+ * \notefnerr
144
+ *
145
+ * \sa
146
+ * ::cuGraphicsUnregisterResource,
147
+ * ::cuGraphicsMapResources,
148
+ * ::cuGraphicsResourceGetMappedPointer,
149
+ * ::cudaGraphicsGLRegisterBuffer
150
+ */
151
+ CUresult CUDAAPI cuGraphicsGLRegisterBuffer(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags);
152
+
153
+ /**
154
+ * \brief Register an OpenGL texture or renderbuffer object
155
+ *
156
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
157
+ * A handle to the registered object is returned as \p pCudaResource.
158
+ *
159
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
160
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
161
+ * or ::GL_RENDERBUFFER.
162
+ *
163
+ * The register flags \p Flags specify the intended usage, as follows:
164
+ *
165
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this
166
+ * resource will be used. It is therefore assumed that this resource will be
167
+ * read from and written to by CUDA. This is the default value.
168
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA
169
+ * will not write to this resource.
170
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that
171
+ * CUDA will not read from this resource and will write over the
172
+ * entire contents of the resource, so none of the data previously
173
+ * stored in the resource will be preserved.
174
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will
175
+ * bind this resource to a surface reference.
176
+ * - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform
177
+ * texture gather operations on this resource.
178
+ *
179
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
180
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
181
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
182
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
183
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
184
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
185
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
186
+ *
187
+ * The following image classes are currently disallowed:
188
+ * - Textures with borders
189
+ * - Multisampled renderbuffers
190
+ *
191
+ * \param pCudaResource - Pointer to the returned object handle
192
+ * \param image - name of texture or renderbuffer object to be registered
193
+ * \param target - Identifies the type of object specified by \p image
194
+ * \param Flags - Register flags
195
+ *
196
+ * \return
197
+ * ::CUDA_SUCCESS,
198
+ * ::CUDA_ERROR_INVALID_HANDLE,
199
+ * ::CUDA_ERROR_ALREADY_MAPPED,
200
+ * ::CUDA_ERROR_INVALID_CONTEXT,
201
+ * ::CUDA_ERROR_OPERATING_SYSTEM
202
+ * \notefnerr
203
+ *
204
+ * \sa
205
+ * ::cuGraphicsUnregisterResource,
206
+ * ::cuGraphicsMapResources,
207
+ * ::cuGraphicsSubResourceGetMappedArray,
208
+ * ::cudaGraphicsGLRegisterImage
209
+ */
210
+ CUresult CUDAAPI cuGraphicsGLRegisterImage(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags);
211
+
212
+ #ifdef _WIN32
213
+ /**
214
+ * \brief Gets the CUDA device associated with hGpu
215
+ *
216
+ * Returns in \p *pDevice the CUDA device associated with a \p hGpu, if
217
+ * applicable.
218
+ *
219
+ * \param pDevice - Device associated with hGpu
220
+ * \param hGpu - Handle to a GPU, as queried via ::WGL_NV_gpu_affinity()
221
+ *
222
+ * \return
223
+ * ::CUDA_SUCCESS,
224
+ * ::CUDA_ERROR_DEINITIALIZED,
225
+ * ::CUDA_ERROR_NOT_INITIALIZED,
226
+ * ::CUDA_ERROR_INVALID_CONTEXT,
227
+ * ::CUDA_ERROR_INVALID_VALUE
228
+ * \notefnerr
229
+ *
230
+ * \sa ::cuGLMapBufferObject,
231
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
232
+ * ::cuGLUnregisterBufferObject, ::cuGLUnmapBufferObjectAsync,
233
+ * ::cuGLSetBufferObjectMapFlags,
234
+ * ::cudaWGLGetDevice
235
+ */
236
+ CUresult CUDAAPI cuWGLGetDevice(CUdevice *pDevice, HGPUNV hGpu);
237
+ #endif /* _WIN32 */
238
+
239
+ /**
240
+ * CUDA devices corresponding to an OpenGL device
241
+ */
242
+ typedef enum CUGLDeviceList_enum {
243
+ CU_GL_DEVICE_LIST_ALL = 0x01, /**< The CUDA devices for all GPUs used by the current OpenGL context */
244
+ CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
245
+ CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03, /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
246
+ } CUGLDeviceList;
247
+
248
+ /**
249
+ * \brief Gets the CUDA devices associated with the current OpenGL context
250
+ *
251
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
252
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
253
+ * at most cudaDeviceCount of the CUDA-compatible devices corresponding to
254
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
255
+ * context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE.
256
+ *
257
+ * The \p deviceList argument may be any of the following:
258
+ * - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context.
259
+ * - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to
260
+ * render the current frame (in SLI).
261
+ * - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to
262
+ * render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that
263
+ * this is correct in all cases.
264
+ *
265
+ * \param pCudaDeviceCount - Returned number of CUDA devices.
266
+ * \param pCudaDevices - Returned CUDA devices.
267
+ * \param cudaDeviceCount - The size of the output device array pCudaDevices.
268
+ * \param deviceList - The set of devices to return.
269
+ *
270
+ * \return
271
+ * ::CUDA_SUCCESS,
272
+ * ::CUDA_ERROR_NO_DEVICE,
273
+ * ::CUDA_ERROR_INVALID_VALUE,
274
+ * ::CUDA_ERROR_INVALID_CONTEXT,
275
+ * ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT,
276
+ * ::CUDA_ERROR_OPERATING_SYSTEM
277
+ *
278
+ * \notefnerr
279
+ *
280
+ * \sa
281
+ * ::cuWGLGetDevice,
282
+ * ::cudaGLGetDevices
283
+ */
284
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
285
+
286
+ /**
287
+ * \defgroup CUDA_GL_DEPRECATED OpenGL Interoperability [DEPRECATED]
288
+ *
289
+ * ___MANBRIEF___ deprecated OpenGL interoperability functions of the low-level
290
+ * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___
291
+ *
292
+ * This section describes deprecated OpenGL interoperability functionality.
293
+ *
294
+ * @{
295
+ */
296
+
297
+ /** Flags to map or unmap a resource */
298
+ typedef enum CUGLmap_flags_enum {
299
+ CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00,
300
+ CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01,
301
+ CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02,
302
+ } CUGLmap_flags;
303
+
304
+ /**
305
+ * \brief Create a CUDA context for interoperability with OpenGL
306
+ *
307
+ * \deprecated This function is deprecated as of Cuda 5.0.
308
+ *
309
+ * This function is deprecated and should no longer be used. It is
310
+ * no longer necessary to associate a CUDA context with an OpenGL
311
+ * context in order to achieve maximum interoperability performance.
312
+ *
313
+ * \param pCtx - Returned CUDA context
314
+ * \param Flags - Options for CUDA context creation
315
+ * \param device - Device on which to create the context
316
+ *
317
+ * \return
318
+ * ::CUDA_SUCCESS,
319
+ * ::CUDA_ERROR_DEINITIALIZED,
320
+ * ::CUDA_ERROR_NOT_INITIALIZED,
321
+ * ::CUDA_ERROR_INVALID_CONTEXT,
322
+ * ::CUDA_ERROR_INVALID_VALUE,
323
+ * ::CUDA_ERROR_OUT_OF_MEMORY
324
+ * \notefnerr
325
+ *
326
+ * \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject,
327
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
328
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
329
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
330
+ * ::cuWGLGetDevice
331
+ */
332
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
333
+
334
+ /**
335
+ * \brief Initializes OpenGL interoperability
336
+ *
337
+ * \deprecated This function is deprecated as of Cuda 3.0.
338
+ *
339
+ * Initializes OpenGL interoperability. This function is deprecated
340
+ * and calling it is no longer required. It may fail if the needed
341
+ * OpenGL driver facilities are not available.
342
+ *
343
+ * \return
344
+ * ::CUDA_SUCCESS,
345
+ * ::CUDA_ERROR_DEINITIALIZED,
346
+ * ::CUDA_ERROR_NOT_INITIALIZED,
347
+ * ::CUDA_ERROR_INVALID_CONTEXT,
348
+ * ::CUDA_ERROR_UNKNOWN
349
+ * \notefnerr
350
+ *
351
+ * \sa ::cuGLMapBufferObject,
352
+ * ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject,
353
+ * ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync,
354
+ * ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags,
355
+ * ::cuWGLGetDevice
356
+ */
357
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLInit(void);
358
+
359
+ /**
360
+ * \brief Registers an OpenGL buffer object
361
+ *
362
+ * \deprecated This function is deprecated as of Cuda 3.0.
363
+ *
364
+ * Registers the buffer object specified by \p buffer for access by
365
+ * CUDA. This function must be called before CUDA can map the buffer
366
+ * object. There must be a valid OpenGL context bound to the current
367
+ * thread when this function is called, and the buffer name is
368
+ * resolved by that context.
369
+ *
370
+ * \param buffer - The name of the buffer object to register.
371
+ *
372
+ * \return
373
+ * ::CUDA_SUCCESS,
374
+ * ::CUDA_ERROR_DEINITIALIZED,
375
+ * ::CUDA_ERROR_NOT_INITIALIZED,
376
+ * ::CUDA_ERROR_INVALID_CONTEXT,
377
+ * ::CUDA_ERROR_ALREADY_MAPPED
378
+ * \notefnerr
379
+ *
380
+ * \sa ::cuGraphicsGLRegisterBuffer
381
+ */
382
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLRegisterBufferObject(GLuint buffer);
383
+
384
+ /**
385
+ * \brief Maps an OpenGL buffer object
386
+ *
387
+ * \deprecated This function is deprecated as of Cuda 3.0.
388
+ *
389
+ * Maps the buffer object specified by \p buffer into the address space of the
390
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
391
+ * and size of the resulting mapping.
392
+ *
393
+ * There must be a valid OpenGL context bound to the current thread
394
+ * when this function is called. This must be the same context, or a
395
+ * member of the same shareGroup, as the context that was bound when
396
+ * the buffer was registered.
397
+ *
398
+ * All streams in the current CUDA context are synchronized with the
399
+ * current GL context.
400
+ *
401
+ * \param dptr - Returned mapped base pointer
402
+ * \param size - Returned size of mapping
403
+ * \param buffer - The name of the buffer object to map
404
+ *
405
+ * \return
406
+ * ::CUDA_SUCCESS,
407
+ * ::CUDA_ERROR_DEINITIALIZED,
408
+ * ::CUDA_ERROR_NOT_INITIALIZED,
409
+ * ::CUDA_ERROR_INVALID_CONTEXT,
410
+ * ::CUDA_ERROR_INVALID_VALUE,
411
+ * ::CUDA_ERROR_MAP_FAILED
412
+ * \notefnerr
413
+ *
414
+ * \sa ::cuGraphicsMapResources
415
+ */
416
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr *dptr, size_t *size, GLuint buffer);
417
+
418
+ /**
419
+ * \brief Unmaps an OpenGL buffer object
420
+ *
421
+ * \deprecated This function is deprecated as of Cuda 3.0.
422
+ *
423
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
424
+ *
425
+ * There must be a valid OpenGL context bound to the current thread
426
+ * when this function is called. This must be the same context, or a
427
+ * member of the same shareGroup, as the context that was bound when
428
+ * the buffer was registered.
429
+ *
430
+ * All streams in the current CUDA context are synchronized with the
431
+ * current GL context.
432
+ *
433
+ * \param buffer - Buffer object to unmap
434
+ *
435
+ * \return
436
+ * ::CUDA_SUCCESS,
437
+ * ::CUDA_ERROR_DEINITIALIZED,
438
+ * ::CUDA_ERROR_NOT_INITIALIZED,
439
+ * ::CUDA_ERROR_INVALID_CONTEXT,
440
+ * ::CUDA_ERROR_INVALID_VALUE
441
+ * \notefnerr
442
+ *
443
+ * \sa ::cuGraphicsUnmapResources
444
+ */
445
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObject(GLuint buffer);
446
+
447
+ /**
448
+ * \brief Unregister an OpenGL buffer object
449
+ *
450
+ * \deprecated This function is deprecated as of Cuda 3.0.
451
+ *
452
+ * Unregisters the buffer object specified by \p buffer. This
453
+ * releases any resources associated with the registered buffer.
454
+ * After this call, the buffer may no longer be mapped for access by
455
+ * CUDA.
456
+ *
457
+ * There must be a valid OpenGL context bound to the current thread
458
+ * when this function is called. This must be the same context, or a
459
+ * member of the same shareGroup, as the context that was bound when
460
+ * the buffer was registered.
461
+ *
462
+ * \param buffer - Name of the buffer object to unregister
463
+ *
464
+ * \return
465
+ * ::CUDA_SUCCESS,
466
+ * ::CUDA_ERROR_DEINITIALIZED,
467
+ * ::CUDA_ERROR_NOT_INITIALIZED,
468
+ * ::CUDA_ERROR_INVALID_CONTEXT,
469
+ * ::CUDA_ERROR_INVALID_VALUE
470
+ * \notefnerr
471
+ *
472
+ * \sa ::cuGraphicsUnregisterResource
473
+ */
474
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnregisterBufferObject(GLuint buffer);
475
+
476
+ /**
477
+ * \brief Set the map flags for an OpenGL buffer object
478
+ *
479
+ * \deprecated This function is deprecated as of Cuda 3.0.
480
+ *
481
+ * Sets the map flags for the buffer object specified by \p buffer.
482
+ *
483
+ * Changes to \p Flags will take effect the next time \p buffer is mapped.
484
+ * The \p Flags argument may be any of the following:
485
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
486
+ * resource will be used. It is therefore assumed that this resource will be
487
+ * read from and written to by CUDA kernels. This is the default value.
488
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which
489
+ * access this resource will not write to this resource.
490
+ * - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels
491
+ * which access this resource will not read from this resource and will
492
+ * write over the entire contents of the resource, so none of the data
493
+ * previously stored in the resource will be preserved.
494
+ *
495
+ * If \p buffer has not been registered for use with CUDA, then
496
+ * ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently
497
+ * mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned.
498
+ *
499
+ * There must be a valid OpenGL context bound to the current thread
500
+ * when this function is called. This must be the same context, or a
501
+ * member of the same shareGroup, as the context that was bound when
502
+ * the buffer was registered.
503
+ *
504
+ * \param buffer - Buffer object to unmap
505
+ * \param Flags - Map flags
506
+ *
507
+ * \return
508
+ * ::CUDA_SUCCESS,
509
+ * ::CUDA_ERROR_NOT_INITIALIZED,
510
+ * ::CUDA_ERROR_INVALID_HANDLE,
511
+ * ::CUDA_ERROR_ALREADY_MAPPED,
512
+ * ::CUDA_ERROR_INVALID_CONTEXT,
513
+ * \notefnerr
514
+ *
515
+ * \sa ::cuGraphicsResourceSetMapFlags
516
+ */
517
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLSetBufferObjectMapFlags(GLuint buffer, unsigned int Flags);
518
+
519
+ /**
520
+ * \brief Maps an OpenGL buffer object
521
+ *
522
+ * \deprecated This function is deprecated as of Cuda 3.0.
523
+ *
524
+ * Maps the buffer object specified by \p buffer into the address space of the
525
+ * current CUDA context and returns in \p *dptr and \p *size the base pointer
526
+ * and size of the resulting mapping.
527
+ *
528
+ * There must be a valid OpenGL context bound to the current thread
529
+ * when this function is called. This must be the same context, or a
530
+ * member of the same shareGroup, as the context that was bound when
531
+ * the buffer was registered.
532
+ *
533
+ * Stream \p hStream in the current CUDA context is synchronized with
534
+ * the current GL context.
535
+ *
536
+ * \param dptr - Returned mapped base pointer
537
+ * \param size - Returned size of mapping
538
+ * \param buffer - The name of the buffer object to map
539
+ * \param hStream - Stream to synchronize
540
+ *
541
+ * \return
542
+ * ::CUDA_SUCCESS,
543
+ * ::CUDA_ERROR_DEINITIALIZED,
544
+ * ::CUDA_ERROR_NOT_INITIALIZED,
545
+ * ::CUDA_ERROR_INVALID_CONTEXT,
546
+ * ::CUDA_ERROR_INVALID_VALUE,
547
+ * ::CUDA_ERROR_MAP_FAILED
548
+ * \notefnerr
549
+ *
550
+ * \sa ::cuGraphicsMapResources
551
+ */
552
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
553
+
554
+ /**
555
+ * \brief Unmaps an OpenGL buffer object
556
+ *
557
+ * \deprecated This function is deprecated as of Cuda 3.0.
558
+ *
559
+ * Unmaps the buffer object specified by \p buffer for access by CUDA.
560
+ *
561
+ * There must be a valid OpenGL context bound to the current thread
562
+ * when this function is called. This must be the same context, or a
563
+ * member of the same shareGroup, as the context that was bound when
564
+ * the buffer was registered.
565
+ *
566
+ * Stream \p hStream in the current CUDA context is synchronized with
567
+ * the current GL context.
568
+ *
569
+ * \param buffer - Name of the buffer object to unmap
570
+ * \param hStream - Stream to synchronize
571
+ *
572
+ * \return
573
+ * ::CUDA_SUCCESS,
574
+ * ::CUDA_ERROR_DEINITIALIZED,
575
+ * ::CUDA_ERROR_NOT_INITIALIZED,
576
+ * ::CUDA_ERROR_INVALID_CONTEXT,
577
+ * ::CUDA_ERROR_INVALID_VALUE
578
+ * \notefnerr
579
+ *
580
+ * \sa ::cuGraphicsUnmapResources
581
+ */
582
+ __CUDA_DEPRECATED CUresult CUDAAPI cuGLUnmapBufferObjectAsync(GLuint buffer, CUstream hStream);
583
+
584
+ /** @} */ /* END CUDA_GL_DEPRECATED */
585
+ /** @} */ /* END CUDA_GL */
586
+
587
+
588
+ #if defined(__CUDA_API_VERSION_INTERNAL)
589
+ #undef cuGLCtxCreate
590
+ #undef cuGLMapBufferObject
591
+ #undef cuGLMapBufferObjectAsync
592
+ #undef cuGLGetDevices
593
+
594
+ CUresult CUDAAPI cuGLGetDevices(unsigned int *pCudaDeviceCount, CUdevice *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
595
+ CUresult CUDAAPI cuGLMapBufferObject_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer);
596
+ CUresult CUDAAPI cuGLMapBufferObjectAsync_v2(CUdeviceptr *dptr, size_t *size, GLuint buffer, CUstream hStream);
597
+ CUresult CUDAAPI cuGLCtxCreate(CUcontext *pCtx, unsigned int Flags, CUdevice device );
598
+ CUresult CUDAAPI cuGLMapBufferObject(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer);
599
+ CUresult CUDAAPI cuGLMapBufferObjectAsync(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream);
600
+ #endif /* __CUDA_API_VERSION_INTERNAL */
601
+
602
+ #ifdef __cplusplus
603
+ };
604
+ #endif
605
+
606
+ #undef __CUDA_DEPRECATED
607
+
608
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaGLTypedefs.h ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAGLTYPEDEFS_H
51
+ #define CUDAGLTYPEDEFS_H
52
+
53
+ // Dependent includes for cudagl.h
54
+ #include <GL/gl.h>
55
+
56
+ #include <cudaGL.h>
57
+
58
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
59
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds
60
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz
61
+ #else
62
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version
63
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version
64
+ #endif
65
+
66
+ #ifdef __cplusplus
67
+ extern "C" {
68
+ #endif // __cplusplus
69
+
70
+ /*
71
+ * Macros for the latest version for each driver function in cudaGL.h
72
+ */
73
+ #define PFN_cuGraphicsGLRegisterBuffer PFN_cuGraphicsGLRegisterBuffer_v3000
74
+ #define PFN_cuGraphicsGLRegisterImage PFN_cuGraphicsGLRegisterImage_v3000
75
+ #define PFN_cuWGLGetDevice PFN_cuWGLGetDevice_v2020
76
+ #define PFN_cuGLGetDevices PFN_cuGLGetDevices_v6050
77
+ #define PFN_cuGLCtxCreate PFN_cuGLCtxCreate_v3020
78
+ #define PFN_cuGLInit PFN_cuGLInit_v2000
79
+ #define PFN_cuGLRegisterBufferObject PFN_cuGLRegisterBufferObject_v2000
80
+ #define PFN_cuGLMapBufferObject __API_TYPEDEF_PTDS(PFN_cuGLMapBufferObject, 3020, 7000)
81
+ #define PFN_cuGLUnmapBufferObject PFN_cuGLUnmapBufferObject_v2000
82
+ #define PFN_cuGLUnregisterBufferObject PFN_cuGLUnregisterBufferObject_v2000
83
+ #define PFN_cuGLSetBufferObjectMapFlags PFN_cuGLSetBufferObjectMapFlags_v2030
84
+ #define PFN_cuGLMapBufferObjectAsync __API_TYPEDEF_PTSZ(PFN_cuGLMapBufferObjectAsync, 3020, 7000)
85
+ #define PFN_cuGLUnmapBufferObjectAsync PFN_cuGLUnmapBufferObjectAsync_v2030
86
+
87
+
88
+ /**
89
+ * Type definitions for functions defined in cudaGL.h
90
+ */
91
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterBuffer_v3000)(CUgraphicsResource *pCudaResource, GLuint buffer, unsigned int Flags);
92
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsGLRegisterImage_v3000)(CUgraphicsResource *pCudaResource, GLuint image, GLenum target, unsigned int Flags);
93
+ #ifdef _WIN32
94
+ typedef CUresult (CUDAAPI *PFN_cuWGLGetDevice_v2020)(CUdevice_v1 *pDevice, HGPUNV hGpu);
95
+ #endif
96
+ typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v6050)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
97
+ typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v3020)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device);
98
+ typedef CUresult (CUDAAPI *PFN_cuGLInit_v2000)(void);
99
+ typedef CUresult (CUDAAPI *PFN_cuGLRegisterBufferObject_v2000)(GLuint buffer);
100
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v7000_ptds)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer);
101
+ typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObject_v2000)(GLuint buffer);
102
+ typedef CUresult (CUDAAPI *PFN_cuGLUnregisterBufferObject_v2000)(GLuint buffer);
103
+ typedef CUresult (CUDAAPI *PFN_cuGLSetBufferObjectMapFlags_v2030)(GLuint buffer, unsigned int Flags);
104
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v7000_ptsz)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream);
105
+ typedef CUresult (CUDAAPI *PFN_cuGLUnmapBufferObjectAsync_v2030)(GLuint buffer, CUstream hStream);
106
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer);
107
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v3020)(CUdeviceptr_v2 *dptr, size_t *size, GLuint buffer, CUstream hStream);
108
+
109
+ /*
110
+ * Type definitions for older versioned functions in cuda.h
111
+ */
112
+ #if defined(__CUDA_API_VERSION_INTERNAL)
113
+ typedef CUresult (CUDAAPI *PFN_cuGLGetDevices_v4010)(unsigned int *pCudaDeviceCount, CUdevice_v1 *pCudaDevices, unsigned int cudaDeviceCount, CUGLDeviceList deviceList);
114
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObject_v2000)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer);
115
+ typedef CUresult (CUDAAPI *PFN_cuGLMapBufferObjectAsync_v2030)(CUdeviceptr_v1 *dptr, unsigned int *size, GLuint buffer, CUstream hStream);
116
+ typedef CUresult (CUDAAPI *PFN_cuGLCtxCreate_v2000)(CUcontext *pCtx, unsigned int Flags, CUdevice_v1 device);
117
+ #endif
118
+
119
+ #ifdef __cplusplus
120
+ }
121
+ #endif // __cplusplus
122
+
123
+ #endif // file guard
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAPROFILERTYPEDEFS_H
51
+ #define CUDAPROFILERTYPEDEFS_H
52
+
53
+ #include <cudaProfiler.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaProfiler.h
61
+ */
62
+ #define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000
63
+ #define PFN_cuProfilerStart PFN_cuProfilerStart_v4000
64
+ #define PFN_cuProfilerStop PFN_cuProfilerStop_v4000
65
+
66
+
67
+ /**
68
+ * Type definitions for functions defined in cudaProfiler.h
69
+ */
70
+ typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode);
71
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void);
72
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void);
73
+
74
+ #ifdef __cplusplus
75
+ }
76
+ #endif // __cplusplus
77
+
78
+ #endif // file guard
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAU_H
51
+ #define CUDAVDPAU_H
52
+
53
+ #ifdef CUDA_FORCE_API_VERSION
54
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
55
+ #endif
56
+
57
+ #define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2
58
+
59
+ #ifdef __cplusplus
60
+ extern "C" {
61
+ #endif
62
+
63
+ /**
64
+ * \defgroup CUDA_VDPAU VDPAU Interoperability
65
+ * \ingroup CUDA_DRIVER
66
+ *
67
+ * ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver
68
+ * API (___CURRENT_FILE___) ___ENDMANBRIEF___
69
+ *
70
+ * This section describes the VDPAU interoperability functions of the
71
+ * low-level CUDA driver application programming interface.
72
+ *
73
+ * @{
74
+ */
75
+
76
+ /**
77
+ * \brief Gets the CUDA device associated with a VDPAU device
78
+ *
79
+ * Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
80
+ * applicable.
81
+ *
82
+ * \param pDevice - Device associated with vdpDevice
83
+ * \param vdpDevice - A VdpDevice handle
84
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
85
+ *
86
+ * \return
87
+ * ::CUDA_SUCCESS,
88
+ * ::CUDA_ERROR_DEINITIALIZED,
89
+ * ::CUDA_ERROR_NOT_INITIALIZED,
90
+ * ::CUDA_ERROR_INVALID_CONTEXT,
91
+ * ::CUDA_ERROR_INVALID_VALUE
92
+ * \notefnerr
93
+ *
94
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
95
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
96
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
97
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
98
+ * ::cudaVDPAUGetDevice
99
+ */
100
+ CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
101
+
102
+ /**
103
+ * \brief Create a CUDA context for interoperability with VDPAU
104
+ *
105
+ * Creates a new CUDA context, initializes VDPAU interoperability, and
106
+ * associates the CUDA context with the calling thread. It must be called
107
+ * before performing any other VDPAU interoperability operations. It may fail
108
+ * if the needed VDPAU driver facilities are not available. For usage of the
109
+ * \p flags parameter, see ::cuCtxCreate().
110
+ *
111
+ * \param pCtx - Returned CUDA context
112
+ * \param flags - Options for CUDA context creation
113
+ * \param device - Device on which to create the context
114
+ * \param vdpDevice - The VdpDevice to interop with
115
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
116
+ *
117
+ * \return
118
+ * ::CUDA_SUCCESS,
119
+ * ::CUDA_ERROR_DEINITIALIZED,
120
+ * ::CUDA_ERROR_NOT_INITIALIZED,
121
+ * ::CUDA_ERROR_INVALID_CONTEXT,
122
+ * ::CUDA_ERROR_INVALID_VALUE,
123
+ * ::CUDA_ERROR_OUT_OF_MEMORY
124
+ * \notefnerr
125
+ *
126
+ * \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
127
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
128
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
129
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
130
+ * ::cuVDPAUGetDevice
131
+ */
132
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
133
+
134
+ /**
135
+ * \brief Registers a VDPAU VdpVideoSurface object
136
+ *
137
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by
138
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
139
+ * The surface's intended usage is specified using \p flags, as follows:
140
+ *
141
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
142
+ * resource will be used. It is therefore assumed that this resource will be
143
+ * read from and written to by CUDA. This is the default value.
144
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
145
+ * will not write to this resource.
146
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
147
+ * CUDA will not read from this resource and will write over the
148
+ * entire contents of the resource, so none of the data previously
149
+ * stored in the resource will be preserved.
150
+ *
151
+ * The VdpVideoSurface is presented as an array of subresources that may be
152
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
153
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
154
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
155
+ *
156
+ * \htmlonly
157
+ * <table>
158
+ * <tr><th>VdpChromaType </th><th>arrayIndex</th><th>Size </th><th>Format</th><th>Content </th></tr>
159
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_420</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
160
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
161
+ * <tr> <td>2 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Top-field chroma </td></tr>
162
+ * <tr> <td>3 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
163
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_422</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
164
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
165
+ * <tr> <td>2 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Top-field chroma </td></tr>
166
+ * <tr> <td>3 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
167
+ * </table>
168
+ * \endhtmlonly
169
+ *
170
+ * \latexonly
171
+ * \begin{tabular}{|l|l|l|l|l|}
172
+ * \hline
173
+ * VdpChromaType & arrayIndex & Size & Format & Content \\
174
+ * \hline
175
+ * VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\
176
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
177
+ * & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\
178
+ * & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\
179
+ * \hline
180
+ * VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\
181
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
182
+ * & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\
183
+ * & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\
184
+ * \hline
185
+ * \end{tabular}
186
+ * \endlatexonly
187
+ *
188
+ * \param pCudaResource - Pointer to the returned object handle
189
+ * \param vdpSurface - The VdpVideoSurface to be registered
190
+ * \param flags - Map flags
191
+ *
192
+ * \return
193
+ * ::CUDA_SUCCESS,
194
+ * ::CUDA_ERROR_INVALID_HANDLE,
195
+ * ::CUDA_ERROR_ALREADY_MAPPED,
196
+ * ::CUDA_ERROR_INVALID_CONTEXT,
197
+ * \notefnerr
198
+ *
199
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
200
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
201
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
202
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
203
+ * ::cuVDPAUGetDevice,
204
+ * ::cudaGraphicsVDPAURegisterVideoSurface
205
+ */
206
+ CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
207
+
208
+ /**
209
+ * \brief Registers a VDPAU VdpOutputSurface object
210
+ *
211
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by
212
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
213
+ * The surface's intended usage is specified using \p flags, as follows:
214
+ *
215
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
216
+ * resource will be used. It is therefore assumed that this resource will be
217
+ * read from and written to by CUDA. This is the default value.
218
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
219
+ * will not write to this resource.
220
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
221
+ * CUDA will not read from this resource and will write over the
222
+ * entire contents of the resource, so none of the data previously
223
+ * stored in the resource will be preserved.
224
+ *
225
+ * The VdpOutputSurface is presented as an array of subresources that may be
226
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
227
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
228
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
229
+ *
230
+ * \htmlonly
231
+ * <table>
232
+ * <tr><th>VdpRGBAFormat </th><th>arrayIndex</th><th>Size </th><th>Format </th><th>Content </th></tr>
233
+ * <tr><td>VDP_RGBA_FORMAT_B8G8R8A8 </td><td>0 </td><td>w x h</td><td>ARGB8 </td><td>Entire surface</td></tr>
234
+ * <tr><td>VDP_RGBA_FORMAT_R10G10B10A2</td><td>0 </td><td>w x h</td><td>A2BGR10</td><td>Entire surface</td></tr>
235
+ * </table>
236
+ * \endhtmlonly
237
+ *
238
+ * \latexonly
239
+ * \begin{tabular}{|l|l|l|l|l|}
240
+ * \hline
241
+ * VdpRGBAFormat & arrayIndex & Size & Format & Content \\
242
+ * \hline
243
+ * VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\
244
+ * VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\
245
+ * \hline
246
+ * \end{tabular}
247
+ * \endlatexonly
248
+ *
249
+ * \param pCudaResource - Pointer to the returned object handle
250
+ * \param vdpSurface - The VdpOutputSurface to be registered
251
+ * \param flags - Map flags
252
+ *
253
+ * \return
254
+ * ::CUDA_SUCCESS,
255
+ * ::CUDA_ERROR_INVALID_HANDLE,
256
+ * ::CUDA_ERROR_ALREADY_MAPPED,
257
+ * ::CUDA_ERROR_INVALID_CONTEXT,
258
+ * \notefnerr
259
+ *
260
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
261
+ * ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource,
262
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
263
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
264
+ * ::cuVDPAUGetDevice,
265
+ * ::cudaGraphicsVDPAURegisterOutputSurface
266
+ */
267
+ CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
268
+
269
+ /** @} */ /* END CUDA_VDPAU */
270
+
271
+
272
+ #if defined(__CUDA_API_VERSION_INTERNAL)
273
+ #undef cuVDPAUCtxCreate
274
+
275
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
276
+ #endif /* __CUDA_API_VERSION_INTERNAL */
277
+
278
+ #ifdef __cplusplus
279
+ };
280
+ #endif
281
+
282
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAUTYPEDEFS_H
51
+ #define CUDAVDPAUTYPEDEFS_H
52
+
53
+ // Dependent includes for cudavdpau.h
54
+ #include <vdpau/vdpau.h>
55
+
56
+ #include <cudaVDPAU.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif // __cplusplus
61
+
62
+ /*
63
+ * Macros for the latest version for each driver function in cudaVDPAU.h
64
+ */
65
+ #define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010
66
+ #define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020
67
+ #define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010
68
+ #define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010
69
+
70
+
71
+ /**
72
+ * Type definitions for functions defined in cudaVDPAU.h
73
+ */
74
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
75
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
76
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
77
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
78
+
79
+ /*
80
+ * Type definitions for older versioned functions in cudaVDPAU.h
81
+ */
82
+ #if defined(__CUDA_API_VERSION_INTERNAL)
83
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
84
+ #endif
85
+
86
+ #ifdef __cplusplus
87
+ }
88
+ #endif // __cplusplus
89
+
90
+ #endif // file guard
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_H_
51
+ # define _CUDA_AWBARRIER_H_
52
+
53
+ # include "cuda_awbarrier_primitives.h"
54
+
55
+ # if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ # endif
58
+
59
+ # if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER)
60
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
61
+ -std=c++11 compiler option.
62
+ # endif
63
+
64
+ _CUDA_AWBARRIER_BEGIN_NAMESPACE
65
+
66
+ class awbarrier {
67
+ public:
68
+ class arrival_token {
69
+ public:
70
+ arrival_token() = default;
71
+ ~arrival_token() = default;
72
+ _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const;
73
+ private:
74
+ _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token);
75
+ uint64_t token;
76
+ friend awbarrier;
77
+ };
78
+ awbarrier() = default;
79
+ awbarrier(const awbarrier&) = delete;
80
+ awbarrier& operator=(const awbarrier&) = delete;
81
+ ~awbarrier() = default;
82
+
83
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive();
84
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop();
85
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles);
86
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait_parity(bool phase, uint32_t hint_cycles);
87
+ _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token);
88
+ _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait();
89
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait(arrival_token token, uint32_t maxSleepNanosec);
90
+ _CUDA_AWBARRIER_QUALIFIER bool try_wait_parity(bool phase, uint32_t maxSleepNanosec);
91
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max();
92
+
93
+ private:
94
+ uint64_t barrier;
95
+ friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count);
96
+ friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier);
97
+ friend class pipeline;
98
+ };
99
+
100
+ _CUDA_AWBARRIER_QUALIFIER
101
+ uint32_t awbarrier::arrival_token::pending_count() const
102
+ {
103
+ const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token);
104
+ #if (__CUDA_ARCH__ >= 900)
105
+ return pending_count;
106
+ #else
107
+ return (pending_count >> 15);
108
+ #endif
109
+ }
110
+
111
+ _CUDA_AWBARRIER_QUALIFIER
112
+ awbarrier::arrival_token::arrival_token(uint64_t token)
113
+ : token(token)
114
+ {
115
+ }
116
+
117
+ _CUDA_AWBARRIER_QUALIFIER
118
+ void init(awbarrier* barrier, uint32_t expected_count)
119
+ {
120
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
121
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT);
122
+
123
+ #if (__CUDA_ARCH__ >= 900)
124
+ const uint32_t init_count = expected_count;
125
+ #else
126
+ const uint32_t init_count = (expected_count << 15) + expected_count;
127
+ #endif
128
+
129
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count);
130
+ }
131
+
132
+ _CUDA_AWBARRIER_QUALIFIER
133
+ void inval(awbarrier* barrier)
134
+ {
135
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
136
+
137
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier);
138
+ }
139
+
140
+ _CUDA_AWBARRIER_QUALIFIER
141
+ awbarrier::arrival_token awbarrier::arrive()
142
+ {
143
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
144
+
145
+ #if (__CUDA_ARCH__ < 900)
146
+ const uint32_t arrive_count = 1 << 15;
147
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<false>(&this->barrier, arrive_count);
148
+ (void)
149
+ #else
150
+ const uint64_t token =
151
+ #endif
152
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(&this->barrier);
153
+
154
+ return arrival_token(token);
155
+ }
156
+
157
+ _CUDA_AWBARRIER_QUALIFIER
158
+ awbarrier::arrival_token awbarrier::arrive_and_drop()
159
+ {
160
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
161
+
162
+ #if (__CUDA_ARCH__ < 900)
163
+ const uint32_t arrive_count = 1 << 15;
164
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<true>(&this->barrier, arrive_count);
165
+ (void)
166
+ #else
167
+ const uint64_t token =
168
+ #endif
169
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(&this->barrier);
170
+
171
+ return arrival_token(token);
172
+ }
173
+
174
+ _CUDA_AWBARRIER_QUALIFIER
175
+ bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles)
176
+ {
177
+ constexpr uint64_t max_busy_wait_cycles = 1024;
178
+ constexpr uint32_t max_sleep_ns = 1 << 20;
179
+
180
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
181
+
182
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
183
+ return true;
184
+ }
185
+
186
+ uint64_t start_cycles = clock64();
187
+ uint64_t elapsed_cycles = 0;
188
+ uint32_t sleep_ns = 32;
189
+ while (elapsed_cycles < hint_cycles) {
190
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
191
+ return true;
192
+ }
193
+
194
+ if (elapsed_cycles > max_busy_wait_cycles) {
195
+ __nanosleep(sleep_ns);
196
+ if (sleep_ns < max_sleep_ns) {
197
+ sleep_ns *= 2;
198
+ }
199
+ }
200
+
201
+ elapsed_cycles = clock64() - start_cycles;
202
+ }
203
+
204
+ return false;
205
+ }
206
+
207
+ _CUDA_AWBARRIER_QUALIFIER
208
+ bool awbarrier::timed_wait_parity(bool phase, uint32_t hint_cycles)
209
+ {
210
+ constexpr uint64_t max_busy_wait_cycles = 1024;
211
+ constexpr uint32_t max_sleep_ns = 1 << 20;
212
+
213
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
214
+
215
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
216
+ return true;
217
+ }
218
+
219
+ uint64_t start_cycles = clock64();
220
+ uint64_t elapsed_cycles = 0;
221
+ uint32_t sleep_ns = 32;
222
+ while (elapsed_cycles < hint_cycles) {
223
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(&this->barrier, phase)) {
224
+ return true;
225
+ }
226
+
227
+ if (elapsed_cycles > max_busy_wait_cycles) {
228
+ __nanosleep(sleep_ns);
229
+ if (sleep_ns < max_sleep_ns) {
230
+ sleep_ns *= 2;
231
+ }
232
+ }
233
+
234
+ elapsed_cycles = clock64() - start_cycles;
235
+ }
236
+
237
+ return false;
238
+ }
239
+
240
+ _CUDA_AWBARRIER_QUALIFIER
241
+ bool awbarrier::try_wait(arrival_token token, uint32_t maxSleepNanosec)
242
+ {
243
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
244
+
245
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(&this->barrier, token.token, maxSleepNanosec);
246
+ }
247
+
248
+ _CUDA_AWBARRIER_QUALIFIER
249
+ bool awbarrier::try_wait_parity(bool phase, uint32_t maxSleepNanosec)
250
+ {
251
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
252
+
253
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(&this->barrier, phase, maxSleepNanosec);
254
+ }
255
+
256
+ _CUDA_AWBARRIER_QUALIFIER
257
+ void awbarrier::wait(arrival_token token)
258
+ {
259
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
260
+
261
+ while (!timed_wait(token, ~0u));
262
+ }
263
+
264
+ _CUDA_AWBARRIER_QUALIFIER
265
+ void awbarrier::arrive_and_wait()
266
+ {
267
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
268
+
269
+ this->wait(this->arrive());
270
+ }
271
+
272
+ _CUDA_AWBARRIER_QUALIFIER __host__
273
+ constexpr uint32_t awbarrier::max()
274
+ {
275
+ return _CUDA_AWBARRIER_MAX_COUNT;
276
+ }
277
+
278
+ _CUDA_AWBARRIER_END_NAMESPACE
279
+
280
+ #endif /* !_CUDA_AWBARRIER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_helpers.h ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_HELPERS_H_
51
+ #define _CUDA_AWBARRIER_HELPERS_H_
52
+
53
+ #define _CUDA_AWBARRIER_NAMESPACE nvcuda::experimental
54
+ #define _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace nvcuda { namespace experimental {
55
+ #define _CUDA_AWBARRIER_END_NAMESPACE } }
56
+
57
+ #define _CUDA_AWBARRIER_INTERNAL_NAMESPACE _CUDA_AWBARRIER_NAMESPACE::__awbarrier_internal
58
+ #define _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE _CUDA_AWBARRIER_BEGIN_NAMESPACE namespace __awbarrier_internal {
59
+ #define _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE } _CUDA_AWBARRIER_END_NAMESPACE
60
+
61
+ # if !defined(_CUDA_AWBARRIER_QUALIFIER)
62
+ # define _CUDA_AWBARRIER_QUALIFIER inline __device__
63
+ # endif
64
+ # if !defined(_CUDA_AWBARRIER_STATIC_QUALIFIER)
65
+ # define _CUDA_AWBARRIER_STATIC_QUALIFIER static inline __device__
66
+ #endif
67
+
68
+ #if defined(__CUDA_ARCH__)
69
+ #if (__CUDA_ARCH__ >= 900)
70
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_90
71
+ #elif (__CUDA_ARCH__ >= 800)
72
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_80
73
+ #elif (__CUDA_ARCH__ >= 700)
74
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70
75
+ #endif
76
+ #else
77
+ # define _CUDA_AWBARRIER_SM_TARGET _CUDA_AWBARRIER_SM_70
78
+ #endif
79
+
80
+ #define _CUDA_AWBARRIER_MAX_COUNT ((1 << 14) - 1)
81
+
82
+ #if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900)))
83
+ # define _CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER
84
+ #endif
85
+
86
+ #if !defined(_CUDA_AWBARRIER_DEBUG)
87
+ # if defined(__CUDACC_DEBUG__)
88
+ # define _CUDA_AWBARRIER_DEBUG 1
89
+ # else
90
+ # define _CUDA_AWBARRIER_DEBUG 0
91
+ # endif
92
+ #endif
93
+
94
+ #if defined(_CUDA_AWBARRIER_DEBUG) && (_CUDA_AWBARRIER_DEBUG == 1) && !defined(NDEBUG)
95
+ # if !defined(__CUDACC_RTC__)
96
+ # include <cassert>
97
+ # endif
98
+ # define _CUDA_AWBARRIER_ASSERT(x) assert((x));
99
+ # define _CUDA_AWBARRIER_ABORT() assert(0);
100
+ #else
101
+ # define _CUDA_AWBARRIER_ASSERT(x)
102
+ # define _CUDA_AWBARRIER_ABORT() __trap();
103
+ #endif
104
+
105
+ #if defined(__CUDACC_RTC__)
106
+ typedef unsigned short uint16_t;
107
+ typedef unsigned int uint32_t;
108
+ typedef unsigned long long uint64_t;
109
+ typedef uint64_t uintptr_t;
110
+ #else
111
+ # include <stdint.h>
112
+ #endif
113
+
114
+ // implicitly provided by NVRTC
115
+ #ifndef __CUDACC_RTC__
116
+ #include <nv/target>
117
+ #endif /* !defined(__CUDACC_RTC__) */
118
+
119
+ typedef uint64_t __mbarrier_t;
120
+ typedef uint64_t __mbarrier_token_t;
121
+
122
+ _CUDA_AWBARRIER_BEGIN_INTERNAL_NAMESPACE
123
+
124
+ extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *);
125
+
126
+ union AWBarrier {
127
+ struct {
128
+ uint32_t expected;
129
+ uint32_t pending;
130
+ } split;
131
+ uint64_t raw;
132
+ };
133
+
134
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
135
+ void awbarrier_init(uint64_t* barrier, uint32_t expected_count) {
136
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
137
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count < (1 << 29));
138
+
139
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
140
+ asm volatile ("mbarrier.init.shared.b64 [%0], %1;"
141
+ :
142
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(expected_count)
143
+ : "memory");
144
+ return;
145
+ )
146
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
147
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
148
+
149
+ awbarrier->split.expected = 0x40000000 - expected_count;
150
+ awbarrier->split.pending = 0x80000000 - expected_count;
151
+ return;
152
+ )
153
+ }
154
+
155
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
156
+ void awbarrier_inval(uint64_t* barrier) {
157
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
158
+
159
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
160
+ asm volatile ("mbarrier.inval.shared.b64 [%0];"
161
+ :
162
+ : "r"(__nvvm_get_smem_pointer(barrier))
163
+ : "memory");
164
+ return;
165
+ )
166
+ return;
167
+ }
168
+
169
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
170
+ uint32_t awbarrier_token_pending_count(uint64_t token) {
171
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
172
+ uint32_t __pending_count;
173
+
174
+ asm ("mbarrier.pending_count.b64 %0, %1;"
175
+ : "=r"(__pending_count)
176
+ : "l"(token));
177
+ return __pending_count;
178
+ )
179
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
180
+ const uint32_t pending = token >> 32;
181
+ return 0x80000000 - (pending & 0x7fffffff);
182
+ )
183
+ }
184
+
185
+ template<bool _Drop>
186
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
187
+ uint64_t awbarrier_arrive_drop(uint64_t* barrier) {
188
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
189
+
190
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
191
+ uint64_t token;
192
+
193
+ if (_Drop) {
194
+ asm volatile ("mbarrier.arrive_drop.shared.b64 %0, [%1];"
195
+ : "=l"(token)
196
+ : "r"(__nvvm_get_smem_pointer(barrier))
197
+ : "memory");
198
+ } else {
199
+ asm volatile ("mbarrier.arrive.shared.b64 %0, [%1];"
200
+ : "=l"(token)
201
+ : "r"(__nvvm_get_smem_pointer(barrier))
202
+ : "memory");
203
+ }
204
+
205
+ return token;
206
+ )
207
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
208
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
209
+
210
+ while ((*reinterpret_cast<volatile uint32_t*>(&awbarrier->split.pending) & 0x7fffffff) == 0);
211
+
212
+ if (_Drop) {
213
+ (void)atomicAdd_block(&awbarrier->split.expected, 1);
214
+ }
215
+
216
+ __threadfence_block();
217
+
218
+ const uint32_t old_pending = atomicAdd_block(&awbarrier->split.pending, 1);
219
+ const uint32_t new_pending = old_pending + 1;
220
+ const bool reset = (old_pending ^ new_pending) & 0x80000000;
221
+
222
+ if (reset) {
223
+ __threadfence_block();
224
+
225
+ uint32_t new_expected = *reinterpret_cast<volatile uint32_t*>(&awbarrier->split.expected);
226
+ new_expected &= ~0x40000000;
227
+ if (new_expected & 0x20000000) {
228
+ new_expected |= 0x40000000;
229
+ }
230
+ atomicAdd_block(&awbarrier->split.pending, new_expected);
231
+ }
232
+
233
+ return static_cast<uint64_t>(old_pending) << 32;
234
+ )
235
+ }
236
+
237
+ template<bool _Drop>
238
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
239
+ uint64_t awbarrier_arrive_drop_no_complete(uint64_t* barrier, uint32_t count) {
240
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
241
+ _CUDA_AWBARRIER_ASSERT(count > 0 && count < (1 << 29));
242
+
243
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
244
+ uint64_t token;
245
+
246
+ if (_Drop) {
247
+ asm volatile ("mbarrier.arrive_drop.noComplete.shared.b64 %0, [%1], %2;"
248
+ : "=l"(token)
249
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count)
250
+ : "memory");
251
+ } else {
252
+ asm volatile ("mbarrier.arrive.noComplete.shared.b64 %0, [%1], %2;"
253
+ : "=l"(token)
254
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(count)
255
+ : "memory");
256
+ }
257
+
258
+ return token;
259
+ )
260
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
261
+ AWBarrier* awbarrier = reinterpret_cast<AWBarrier*>(barrier);
262
+
263
+ while ((*reinterpret_cast<volatile uint32_t*>(&awbarrier->split.pending) & 0x7fffffff) == 0);
264
+
265
+ if (_Drop) {
266
+ (void)atomicAdd_block(&awbarrier->split.expected, count);
267
+ }
268
+
269
+ return static_cast<uint64_t>(atomicAdd_block(&awbarrier->split.pending, count)) << 32;
270
+ )
271
+ }
272
+
273
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
274
+ bool awbarrier_test_wait(uint64_t* barrier, uint64_t token) {
275
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
276
+
277
+ NV_IF_TARGET(NV_PROVIDES_SM_80,
278
+ uint32_t __wait_complete;
279
+
280
+ asm volatile ("{"
281
+ " .reg .pred %%p;"
282
+ " mbarrier.test_wait.shared.b64 %%p, [%1], %2;"
283
+ " selp.b32 %0, 1, 0, %%p;"
284
+ "}"
285
+ : "=r"(__wait_complete)
286
+ : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token)
287
+ : "memory");
288
+ return bool(__wait_complete);
289
+ )
290
+ NV_IF_TARGET(NV_PROVIDES_SM_70,
291
+ volatile AWBarrier* awbarrier = reinterpret_cast<volatile AWBarrier*>(barrier);
292
+
293
+ return ((token >> 32) ^ awbarrier->split.pending) & 0x80000000;
294
+ )
295
+ }
296
+
297
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
298
+ bool awbarrier_test_wait_parity(uint64_t* barrier, bool phase_parity) {
299
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
300
+
301
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
302
+ uint32_t __wait_complete = 0;
303
+
304
+ asm volatile ("{"
305
+ ".reg .pred %%p;"
306
+ "mbarrier.test_wait.parity.shared.b64 %%p, [%1], %2;"
307
+ "selp.b32 %0, 1, 0, %%p;"
308
+ "}"
309
+ : "=r"(__wait_complete)
310
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast<uint32_t>(phase_parity))
311
+ : "memory");
312
+
313
+ return __wait_complete;
314
+ )
315
+ _CUDA_AWBARRIER_ABORT()
316
+ return false;
317
+ }
318
+
319
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
320
+ bool awbarrier_try_wait(uint64_t* barrier, uint64_t token, uint32_t max_sleep_nanosec) {
321
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
322
+
323
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
324
+ uint32_t __wait_complete = 0;
325
+
326
+ asm volatile ("{\n\t"
327
+ ".reg .pred p;\n\t"
328
+ "mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n\t"
329
+ "selp.b32 %0, 1, 0, p;\n\t"
330
+ "}"
331
+ : "=r"(__wait_complete)
332
+ : "r"(__nvvm_get_smem_pointer(barrier)), "l"(token), "r"(max_sleep_nanosec)
333
+ : "memory");
334
+
335
+ return __wait_complete;
336
+ )
337
+ _CUDA_AWBARRIER_ABORT()
338
+ return false;
339
+ }
340
+
341
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
342
+ bool awbarrier_try_wait_parity(uint64_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) {
343
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
344
+
345
+ NV_IF_TARGET(NV_PROVIDES_SM_90,
346
+ uint32_t __wait_complete = 0;
347
+
348
+ asm volatile ("{\n\t"
349
+ ".reg .pred p;\n\t"
350
+ "mbarrier.try_wait.parity.shared.b64 p, [%1], %2, %3;\n\t"
351
+ "selp.b32 %0, 1, 0, p;\n\t"
352
+ "}"
353
+ : "=r"(__wait_complete)
354
+ : "r"(__nvvm_get_smem_pointer(barrier)), "r"(static_cast<uint32_t>(phase_parity)), "r"(max_sleep_nanosec)
355
+ : "memory");
356
+
357
+ return __wait_complete;
358
+ )
359
+ _CUDA_AWBARRIER_ABORT()
360
+ return false;
361
+ }
362
+
363
+ _CUDA_AWBARRIER_END_INTERNAL_NAMESPACE
364
+
365
+ #endif /* !_CUDA_AWBARRIER_HELPERS_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_bf16.hpp ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_device_runtime_api.h ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_DEVICE_RUNTIME_API_H__)
51
+ #define __CUDA_DEVICE_RUNTIME_API_H__
52
+
53
+ #if defined(__CUDACC__) && !defined(__CUDACC_RTC__)
54
+ #include <stdlib.h>
55
+ #endif
56
+
57
+ /*******************************************************************************
58
+ * *
59
+ * *
60
+ * *
61
+ *******************************************************************************/
62
+
63
+ #if !defined(CUDA_FORCE_CDP1_IF_SUPPORTED) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_NVHPC_CUDA) && !(defined(_WIN32) && !defined(_WIN64))
64
+ #define __CUDA_INTERNAL_USE_CDP2
65
+ #endif
66
+
67
+ #if !defined(__CUDACC_RTC__)
68
+
69
+ #if !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__)
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif
74
+
75
+ struct cudaFuncAttributes;
76
+
77
+
78
+ #ifndef __CUDA_INTERNAL_USE_CDP2
79
+ inline __device__ cudaError_t CUDARTAPI cudaMalloc(void **p, size_t s)
80
+ {
81
+ return cudaErrorUnknown;
82
+ }
83
+
84
+ inline __device__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *p, const void *c)
85
+ {
86
+ return cudaErrorUnknown;
87
+ }
88
+
89
+ inline __device__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
90
+ {
91
+ return cudaErrorUnknown;
92
+ }
93
+
94
+ inline __device__ cudaError_t CUDARTAPI cudaGetDevice(int *device)
95
+ {
96
+ return cudaErrorUnknown;
97
+ }
98
+
99
+ inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
100
+ {
101
+ return cudaErrorUnknown;
102
+ }
103
+
104
+ inline __device__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
105
+ {
106
+ return cudaErrorUnknown;
107
+ }
108
+ #else // __CUDA_INTERNAL_USE_CDP2
109
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **p, size_t s)
110
+ {
111
+ return cudaErrorUnknown;
112
+ }
113
+
114
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *p, const void *c)
115
+ {
116
+ return cudaErrorUnknown;
117
+ }
118
+
119
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
120
+ {
121
+ return cudaErrorUnknown;
122
+ }
123
+
124
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device)
125
+ {
126
+ return cudaErrorUnknown;
127
+ }
128
+
129
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
130
+ {
131
+ return cudaErrorUnknown;
132
+ }
133
+
134
+ inline __device__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
135
+ {
136
+ return cudaErrorUnknown;
137
+ }
138
+ #endif // __CUDA_INTERNAL_USE_CDP2
139
+
140
+
141
+ #if defined(__cplusplus)
142
+ }
143
+ #endif
144
+
145
+ #endif /* !defined(__CUDACC_INTERNAL_NO_STUBS__) && !defined(__CUDACC_RDC__) && !defined(__CUDACC_EWP__) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350) && !defined(__CUDADEVRT_INTERNAL__) */
146
+
147
+ #endif /* !defined(__CUDACC_RTC__) */
148
+
149
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
150
+ # define __DEPRECATED__(msg)
151
+ #elif defined(_WIN32)
152
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
153
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
154
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
155
+ #else
156
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
157
+ #endif
158
+
159
+ #if defined(__CUDA_ARCH__) && !defined(__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING)
160
+ # define __CDPRT_DEPRECATED(func_name) __DEPRECATED__("Use of "#func_name" from device code is deprecated. Moreover, such use will cause this module to fail to load on sm_90+ devices. If calls to "#func_name" from device code cannot be removed for older devices at this time, you may guard them with __CUDA_ARCH__ macros to remove them only for sm_90+ devices, making sure to generate code for compute_90 for the macros to take effect. Note that this mitigation will no longer work when support for "#func_name" from device code is eventually dropped for all devices. Disable this warning with -D__CDPRT_SUPPRESS_SYNC_DEPRECATION_WARNING.")
161
+ #else
162
+ # define __CDPRT_DEPRECATED(func_name)
163
+ #endif
164
+
165
+ #if defined(__cplusplus) && defined(__CUDACC__) /* Visible to nvcc front-end only */
166
+ #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350) // Visible to SM>=3.5 and "__host__ __device__" only
167
+
168
+ #include "driver_types.h"
169
+ #include "crt/host_defines.h"
170
+
171
+ #define cudaStreamGraphTailLaunch (cudaStream_t)0x0100000000000000
172
+ #define cudaStreamGraphFireAndForget (cudaStream_t)0x0200000000000000
173
+
174
+ #ifdef __CUDA_INTERNAL_USE_CDP2
175
+ #define cudaStreamTailLaunch ((cudaStream_t)0x3) /**< Per-grid stream with a fire-and-forget synchronization behavior. Only applicable when used with CUDA Dynamic Parallelism. */
176
+ #define cudaStreamFireAndForget ((cudaStream_t)0x4) /**< Per-grid stream with a tail launch semantics. Only applicable when used with CUDA Dynamic Parallelism. */
177
+ #endif
178
+
179
+ extern "C"
180
+ {
181
+
182
+ // Symbols beginning with __cudaCDP* should not be used outside
183
+ // this header file. Instead, compile with -DCUDA_FORCE_CDP1_IF_SUPPORTED if
184
+ // CDP1 support is required.
185
+
186
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaDeviceSynchronizeDeprecationAvoidance(void);
187
+
188
+ #ifndef __CUDA_INTERNAL_USE_CDP2
189
+ //// CDP1 endpoints
190
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device);
191
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit);
192
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig);
193
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig);
194
+ #if (__CUDA_ARCH__ < 900) && (defined(CUDA_FORCE_CDP1_IF_SUPPORTED) || (defined(_WIN32) && !defined(_WIN64)))
195
+ // cudaDeviceSynchronize is removed on sm_90+
196
+ extern __device__ __cudart_builtin__ __CDPRT_DEPRECATED(cudaDeviceSynchronize) cudaError_t CUDARTAPI cudaDeviceSynchronize(void);
197
+ #endif
198
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void);
199
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void);
200
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error);
201
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error);
202
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count);
203
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device);
204
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags);
205
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream);
206
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
207
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
208
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags);
209
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream);
210
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream);
211
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
212
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
213
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event);
214
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func);
215
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr);
216
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size);
217
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
218
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
219
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
220
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
221
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
222
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
223
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream);
224
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream);
225
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
226
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
227
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
228
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
229
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion);
230
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize);
231
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
232
+ #endif // __CUDA_INTERNAL_USE_CDP2
233
+
234
+ //// CDP2 endpoints
235
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device);
236
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetLimit(size_t *pValue, enum cudaLimit limit);
237
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig);
238
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2DeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig);
239
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetLastError(void);
240
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2PeekAtLastError(void);
241
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorString(cudaError_t error);
242
+ extern __device__ __cudart_builtin__ const char* CUDARTAPI __cudaCDP2GetErrorName(cudaError_t error);
243
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDeviceCount(int *count);
244
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2GetDevice(int *device);
245
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags);
246
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamDestroy(cudaStream_t stream);
247
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
248
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2StreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags);
249
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventCreateWithFlags(cudaEvent_t *event, unsigned int flags);
250
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord(cudaEvent_t event, cudaStream_t stream);
251
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecord_ptsz(cudaEvent_t event, cudaStream_t stream);
252
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
253
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags);
254
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2EventDestroy(cudaEvent_t event);
255
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2FuncGetAttributes(struct cudaFuncAttributes *attr, const void *func);
256
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Free(void *devPtr);
257
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Malloc(void **devPtr, size_t size);
258
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
259
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream);
260
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
261
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream);
262
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
263
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream);
264
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream);
265
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2MemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream);
266
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
267
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream);
268
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
269
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2Memset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream);
270
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2RuntimeGetVersion(int *runtimeVersion);
271
+ extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBuffer(size_t alignment, size_t size);
272
+ extern __device__ __cudart_builtin__ void * CUDARTAPI __cudaCDP2GetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize);
273
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
274
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream);
275
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
276
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2LaunchDeviceV2(void *parameterBuffer, cudaStream_t stream);
277
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize);
278
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
279
+
280
+
281
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch(cudaGraphExec_t graphExec, cudaStream_t stream);
282
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
283
+ static inline __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGraphLaunch_ptsz(cudaGraphExec_t graphExec, cudaStream_t stream)
284
+ {
285
+ if (stream == 0) {
286
+ stream = cudaStreamPerThread;
287
+ }
288
+ return cudaGraphLaunch(graphExec, stream);
289
+ }
290
+ #endif
291
+
292
+ /**
293
+ * \ingroup CUDART_GRAPH
294
+ * \brief Get the currently running device graph id.
295
+ *
296
+ * Get the currently running device graph id.
297
+ * \return Returns the current device graph id, 0 if the call is outside of a device graph.
298
+ * \sa cudaLaunchDevice
299
+ */
300
+ static inline __device__ __cudart_builtin__ cudaGraphExec_t CUDARTAPI cudaGetCurrentGraphExec(void)
301
+ {
302
+ unsigned long long current_graph_exec;
303
+ asm ("mov.u64 %0, %%current_graph_exec;" : "=l"(current_graph_exec));
304
+ return (cudaGraphExec_t)current_graph_exec;
305
+ }
306
+
307
+ /**
308
+ * \ingroup CUDART_EXECUTION
309
+ * \brief Programmatic dependency trigger
310
+ *
311
+ * This device function ensures the programmatic launch completion edges /
312
+ * events are fulfilled. See
313
+ * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization
314
+ * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more
315
+ * information. The event / edge kick off only happens when every CTAs
316
+ * in the grid has either exited or called this function at least once,
317
+ * otherwise the kick off happens automatically after all warps finishes
318
+ * execution but before the grid completes. The kick off only enables
319
+ * scheduling of the secondary kernel. It provides no memory visibility
320
+ * guarantee itself. The user could enforce memory visibility by inserting a
321
+ * memory fence of the correct scope.
322
+ */
323
+ static inline __device__ __cudart_builtin__ void CUDARTAPI cudaTriggerProgrammaticLaunchCompletion(void)
324
+ {
325
+ asm volatile("griddepcontrol.launch_dependents;":::);
326
+ }
327
+
328
+ /**
329
+ * \ingroup CUDART_EXECUTION
330
+ * \brief Programmatic grid dependency synchronization
331
+ *
332
+ * This device function will block the thread until all direct grid
333
+ * dependencies have completed. This API is intended to use in conjuncture with
334
+ * programmatic / launch event / dependency. See
335
+ * ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticStreamSerialization
336
+ * and ::cudaLaunchAttributeID::cudaLaunchAttributeProgrammaticEvent for more
337
+ * information.
338
+ */
339
+ static inline __device__ __cudart_builtin__ void CUDARTAPI cudaGridDependencySynchronize(void)
340
+ {
341
+ asm volatile("griddepcontrol.wait;":::"memory");
342
+ }
343
+
344
+
345
+ //// CG API
346
+ extern __device__ __cudart_builtin__ unsigned long long CUDARTAPI cudaCGGetIntrinsicHandle(enum cudaCGScope scope);
347
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronize(unsigned long long handle, unsigned int flags);
348
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGSynchronizeGrid(unsigned long long handle, unsigned int flags);
349
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetSize(unsigned int *numThreads, unsigned int *numGrids, unsigned long long handle);
350
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaCGGetRank(unsigned int *threadRank, unsigned int *gridRank, unsigned long long handle);
351
+
352
+
353
+ //// CDP API
354
+
355
+ #ifdef __CUDA_ARCH__
356
+
357
+ #ifdef __CUDA_INTERNAL_USE_CDP2
358
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetAttribute(int *value, enum cudaDeviceAttr attr, int device)
359
+ {
360
+ return __cudaCDP2DeviceGetAttribute(value, attr, device);
361
+ }
362
+
363
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetLimit(size_t *pValue, enum cudaLimit limit)
364
+ {
365
+ return __cudaCDP2DeviceGetLimit(pValue, limit);
366
+ }
367
+
368
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetCacheConfig(enum cudaFuncCache *pCacheConfig)
369
+ {
370
+ return __cudaCDP2DeviceGetCacheConfig(pCacheConfig);
371
+ }
372
+
373
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaDeviceGetSharedMemConfig(enum cudaSharedMemConfig *pConfig)
374
+ {
375
+ return __cudaCDP2DeviceGetSharedMemConfig(pConfig);
376
+ }
377
+
378
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetLastError(void)
379
+ {
380
+ return __cudaCDP2GetLastError();
381
+ }
382
+
383
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaPeekAtLastError(void)
384
+ {
385
+ return __cudaCDP2PeekAtLastError();
386
+ }
387
+
388
+ static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorString(cudaError_t error)
389
+ {
390
+ return __cudaCDP2GetErrorString(error);
391
+ }
392
+
393
+ static __inline__ __device__ __cudart_builtin__ const char* CUDARTAPI cudaGetErrorName(cudaError_t error)
394
+ {
395
+ return __cudaCDP2GetErrorName(error);
396
+ }
397
+
398
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDeviceCount(int *count)
399
+ {
400
+ return __cudaCDP2GetDeviceCount(count);
401
+ }
402
+
403
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaGetDevice(int *device)
404
+ {
405
+ return __cudaCDP2GetDevice(device);
406
+ }
407
+
408
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamCreateWithFlags(cudaStream_t *pStream, unsigned int flags)
409
+ {
410
+ return __cudaCDP2StreamCreateWithFlags(pStream, flags);
411
+ }
412
+
413
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamDestroy(cudaStream_t stream)
414
+ {
415
+ return __cudaCDP2StreamDestroy(stream);
416
+ }
417
+
418
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent(cudaStream_t stream, cudaEvent_t event, unsigned int flags)
419
+ {
420
+ return __cudaCDP2StreamWaitEvent(stream, event, flags);
421
+ }
422
+
423
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaStreamWaitEvent_ptsz(cudaStream_t stream, cudaEvent_t event, unsigned int flags)
424
+ {
425
+ return __cudaCDP2StreamWaitEvent_ptsz(stream, event, flags);
426
+ }
427
+
428
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventCreateWithFlags(cudaEvent_t *event, unsigned int flags)
429
+ {
430
+ return __cudaCDP2EventCreateWithFlags(event, flags);
431
+ }
432
+
433
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord(cudaEvent_t event, cudaStream_t stream)
434
+ {
435
+ return __cudaCDP2EventRecord(event, stream);
436
+ }
437
+
438
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecord_ptsz(cudaEvent_t event, cudaStream_t stream)
439
+ {
440
+ return __cudaCDP2EventRecord_ptsz(event, stream);
441
+ }
442
+
443
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags(cudaEvent_t event, cudaStream_t stream, unsigned int flags)
444
+ {
445
+ return __cudaCDP2EventRecordWithFlags(event, stream, flags);
446
+ }
447
+
448
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventRecordWithFlags_ptsz(cudaEvent_t event, cudaStream_t stream, unsigned int flags)
449
+ {
450
+ return __cudaCDP2EventRecordWithFlags_ptsz(event, stream, flags);
451
+ }
452
+
453
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaEventDestroy(cudaEvent_t event)
454
+ {
455
+ return __cudaCDP2EventDestroy(event);
456
+ }
457
+
458
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFuncGetAttributes(struct cudaFuncAttributes *attr, const void *func)
459
+ {
460
+ return __cudaCDP2FuncGetAttributes(attr, func);
461
+ }
462
+
463
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaFree(void *devPtr)
464
+ {
465
+ return __cudaCDP2Free(devPtr);
466
+ }
467
+
468
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMalloc(void **devPtr, size_t size)
469
+ {
470
+ return __cudaCDP2Malloc(devPtr, size);
471
+ }
472
+
473
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream)
474
+ {
475
+ return __cudaCDP2MemcpyAsync(dst, src, count, kind, stream);
476
+ }
477
+
478
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpyAsync_ptsz(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream)
479
+ {
480
+ return __cudaCDP2MemcpyAsync_ptsz(dst, src, count, kind, stream);
481
+ }
482
+
483
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream)
484
+ {
485
+ return __cudaCDP2Memcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, stream);
486
+ }
487
+
488
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy2DAsync_ptsz(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind, cudaStream_t stream)
489
+ {
490
+ return __cudaCDP2Memcpy2DAsync_ptsz(dst, dpitch, src, spitch, width, height, kind, stream);
491
+ }
492
+
493
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync(const struct cudaMemcpy3DParms *p, cudaStream_t stream)
494
+ {
495
+ return __cudaCDP2Memcpy3DAsync(p, stream);
496
+ }
497
+
498
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemcpy3DAsync_ptsz(const struct cudaMemcpy3DParms *p, cudaStream_t stream)
499
+ {
500
+ return __cudaCDP2Memcpy3DAsync_ptsz(p, stream);
501
+ }
502
+
503
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync(void *devPtr, int value, size_t count, cudaStream_t stream)
504
+ {
505
+ return __cudaCDP2MemsetAsync(devPtr, value, count, stream);
506
+ }
507
+
508
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemsetAsync_ptsz(void *devPtr, int value, size_t count, cudaStream_t stream)
509
+ {
510
+ return __cudaCDP2MemsetAsync_ptsz(devPtr, value, count, stream);
511
+ }
512
+
513
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream)
514
+ {
515
+ return __cudaCDP2Memset2DAsync(devPtr, pitch, value, width, height, stream);
516
+ }
517
+
518
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset2DAsync_ptsz(void *devPtr, size_t pitch, int value, size_t width, size_t height, cudaStream_t stream)
519
+ {
520
+ return __cudaCDP2Memset2DAsync_ptsz(devPtr, pitch, value, width, height, stream);
521
+ }
522
+
523
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream)
524
+ {
525
+ return __cudaCDP2Memset3DAsync(pitchedDevPtr, value, extent, stream);
526
+ }
527
+
528
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaMemset3DAsync_ptsz(struct cudaPitchedPtr pitchedDevPtr, int value, struct cudaExtent extent, cudaStream_t stream)
529
+ {
530
+ return __cudaCDP2Memset3DAsync_ptsz(pitchedDevPtr, value, extent, stream);
531
+ }
532
+
533
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaRuntimeGetVersion(int *runtimeVersion)
534
+ {
535
+ return __cudaCDP2RuntimeGetVersion(runtimeVersion);
536
+ }
537
+
538
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize)
539
+ {
540
+ return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func, blockSize, dynamicSmemSize);
541
+ }
542
+
543
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, const void *func, int blockSize, size_t dynamicSmemSize, unsigned int flags)
544
+ {
545
+ return __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func, blockSize, dynamicSmemSize, flags);
546
+ }
547
+ #endif // __CUDA_INTERNAL_USE_CDP2
548
+
549
+ #endif // __CUDA_ARCH__
550
+
551
+
552
+ /**
553
+ * \ingroup CUDART_EXECUTION
554
+ * \brief Obtains a parameter buffer
555
+ *
556
+ * Obtains a parameter buffer which can be filled with parameters for a kernel launch.
557
+ * Parameters passed to ::cudaLaunchDevice must be allocated via this function.
558
+ *
559
+ * This is a low level API and can only be accessed from Parallel Thread Execution (PTX).
560
+ * CUDA user code should use <<< >>> to launch kernels.
561
+ *
562
+ * \param alignment - Specifies alignment requirement of the parameter buffer
563
+ * \param size - Specifies size requirement in bytes
564
+ *
565
+ * \return
566
+ * Returns pointer to the allocated parameterBuffer
567
+ * \notefnerr
568
+ *
569
+ * \sa cudaLaunchDevice
570
+ */
571
+ #ifdef __CUDA_INTERNAL_USE_CDP2
572
+ static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size)
573
+ {
574
+ return __cudaCDP2GetParameterBuffer(alignment, size);
575
+ }
576
+ #else
577
+ extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBuffer(size_t alignment, size_t size);
578
+ #endif
579
+
580
+
581
+ /**
582
+ * \ingroup CUDART_EXECUTION
583
+ * \brief Launches a specified kernel
584
+ *
585
+ * Launches a specified kernel with the specified parameter buffer. A parameter buffer can be obtained
586
+ * by calling ::cudaGetParameterBuffer().
587
+ *
588
+ * This is a low level API and can only be accessed from Parallel Thread Execution (PTX).
589
+ * CUDA user code should use <<< >>> to launch the kernels.
590
+ *
591
+ * \param func - Pointer to the kernel to be launched
592
+ * \param parameterBuffer - Holds the parameters to the launched kernel. parameterBuffer can be NULL. (Optional)
593
+ * \param gridDimension - Specifies grid dimensions
594
+ * \param blockDimension - Specifies block dimensions
595
+ * \param sharedMemSize - Specifies size of shared memory
596
+ * \param stream - Specifies the stream to be used
597
+ *
598
+ * \return
599
+ * ::cudaSuccess, ::cudaErrorInvalidDevice, ::cudaErrorLaunchMaxDepthExceeded, ::cudaErrorInvalidConfiguration,
600
+ * ::cudaErrorStartupFailure, ::cudaErrorLaunchPendingCountExceeded, ::cudaErrorLaunchOutOfResources
601
+ * \notefnerr
602
+ * \n Please refer to Execution Configuration and Parameter Buffer Layout from the CUDA Programming
603
+ * Guide for the detailed descriptions of launch configuration and parameter layout respectively.
604
+ *
605
+ * \sa cudaGetParameterBuffer
606
+ */
607
+ #ifdef __CUDA_INTERNAL_USE_CDP2
608
+ static __inline__ __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize)
609
+ {
610
+ return __cudaCDP2GetParameterBufferV2(func, gridDimension, blockDimension, sharedMemSize);
611
+ }
612
+ #else
613
+ extern __device__ __cudart_builtin__ void * CUDARTAPI cudaGetParameterBufferV2(void *func, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize);
614
+ #endif
615
+
616
+
617
+ #ifdef __CUDA_INTERNAL_USE_CDP2
618
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
619
+ {
620
+ return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
621
+ }
622
+
623
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream)
624
+ {
625
+ return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream);
626
+ }
627
+ #else
628
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice_ptsz(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
629
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2_ptsz(void *parameterBuffer, cudaStream_t stream);
630
+ #endif
631
+
632
+
633
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
634
+ // When compiling for the device and per thread default stream is enabled, add
635
+ // a static inline redirect to the per thread stream entry points.
636
+
637
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI
638
+ cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
639
+ {
640
+ #ifdef __CUDA_INTERNAL_USE_CDP2
641
+ return __cudaCDP2LaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
642
+ #else
643
+ return cudaLaunchDevice_ptsz(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
644
+ #endif
645
+ }
646
+
647
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI
648
+ cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream)
649
+ {
650
+ #ifdef __CUDA_INTERNAL_USE_CDP2
651
+ return __cudaCDP2LaunchDeviceV2_ptsz(parameterBuffer, stream);
652
+ #else
653
+ return cudaLaunchDeviceV2_ptsz(parameterBuffer, stream);
654
+ #endif
655
+ }
656
+ #else // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
657
+ #ifdef __CUDA_INTERNAL_USE_CDP2
658
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream)
659
+ {
660
+ return __cudaCDP2LaunchDevice(func, parameterBuffer, gridDimension, blockDimension, sharedMemSize, stream);
661
+ }
662
+
663
+ static __inline__ __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream)
664
+ {
665
+ return __cudaCDP2LaunchDeviceV2(parameterBuffer, stream);
666
+ }
667
+ #else // __CUDA_INTERNAL_USE_CDP2
668
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDevice(void *func, void *parameterBuffer, dim3 gridDimension, dim3 blockDimension, unsigned int sharedMemSize, cudaStream_t stream);
669
+ extern __device__ __cudart_builtin__ cudaError_t CUDARTAPI cudaLaunchDeviceV2(void *parameterBuffer, cudaStream_t stream);
670
+ #endif // __CUDA_INTERNAL_USE_CDP2
671
+ #endif // defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) && defined(__CUDA_ARCH__)
672
+
673
+
674
+ // These symbols should not be used outside of this header file.
675
+ #define __cudaCDP2DeviceGetAttribute
676
+ #define __cudaCDP2DeviceGetLimit
677
+ #define __cudaCDP2DeviceGetCacheConfig
678
+ #define __cudaCDP2DeviceGetSharedMemConfig
679
+ #define __cudaCDP2GetLastError
680
+ #define __cudaCDP2PeekAtLastError
681
+ #define __cudaCDP2GetErrorString
682
+ #define __cudaCDP2GetErrorName
683
+ #define __cudaCDP2GetDeviceCount
684
+ #define __cudaCDP2GetDevice
685
+ #define __cudaCDP2StreamCreateWithFlags
686
+ #define __cudaCDP2StreamDestroy
687
+ #define __cudaCDP2StreamWaitEvent
688
+ #define __cudaCDP2StreamWaitEvent_ptsz
689
+ #define __cudaCDP2EventCreateWithFlags
690
+ #define __cudaCDP2EventRecord
691
+ #define __cudaCDP2EventRecord_ptsz
692
+ #define __cudaCDP2EventRecordWithFlags
693
+ #define __cudaCDP2EventRecordWithFlags_ptsz
694
+ #define __cudaCDP2EventDestroy
695
+ #define __cudaCDP2FuncGetAttributes
696
+ #define __cudaCDP2Free
697
+ #define __cudaCDP2Malloc
698
+ #define __cudaCDP2MemcpyAsync
699
+ #define __cudaCDP2MemcpyAsync_ptsz
700
+ #define __cudaCDP2Memcpy2DAsync
701
+ #define __cudaCDP2Memcpy2DAsync_ptsz
702
+ #define __cudaCDP2Memcpy3DAsync
703
+ #define __cudaCDP2Memcpy3DAsync_ptsz
704
+ #define __cudaCDP2MemsetAsync
705
+ #define __cudaCDP2MemsetAsync_ptsz
706
+ #define __cudaCDP2Memset2DAsync
707
+ #define __cudaCDP2Memset2DAsync_ptsz
708
+ #define __cudaCDP2Memset3DAsync
709
+ #define __cudaCDP2Memset3DAsync_ptsz
710
+ #define __cudaCDP2RuntimeGetVersion
711
+ #define __cudaCDP2GetParameterBuffer
712
+ #define __cudaCDP2GetParameterBufferV2
713
+ #define __cudaCDP2LaunchDevice_ptsz
714
+ #define __cudaCDP2LaunchDeviceV2_ptsz
715
+ #define __cudaCDP2LaunchDevice
716
+ #define __cudaCDP2LaunchDeviceV2
717
+ #define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessor
718
+ #define __cudaCDP2OccupancyMaxActiveBlocksPerMultiprocessorWithFlags
719
+
720
+ }
721
+
722
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaMalloc(T **devPtr, size_t size);
723
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaFuncGetAttributes(struct cudaFuncAttributes *attr, T *entry);
724
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize);
725
+ template <typename T> static __inline__ __device__ __cudart_builtin__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, T func, int blockSize, size_t dynamicSmemSize, unsigned int flags);
726
+
727
+
728
+ #endif // !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 350)
729
+ #endif /* defined(__cplusplus) && defined(__CUDACC__) */
730
+
731
+ #undef __DEPRECATED__
732
+ #undef __CDPRT_DEPRECATED
733
+ #undef __CUDA_INTERNAL_USE_CDP2
734
+
735
+ #endif /* !__CUDA_DEVICE_RUNTIME_API_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_EGL_INTEROP_H__)
51
+ #define __CUDA_EGL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+ #include "cuda_runtime.h"
55
+ #include "cudart_platform.h"
56
+ #include "EGL/egl.h"
57
+ #include "EGL/eglext.h"
58
+
59
+ #if defined(__cplusplus)
60
+ extern "C" {
61
+ #endif /* __cplusplus */
62
+
63
+ /**
64
+ * \addtogroup CUDART_TYPES
65
+ * @{
66
+ */
67
+
68
+ /**
69
+ * Maximum number of planes per frame
70
+ */
71
+ #define CUDA_EGL_MAX_PLANES 3
72
+
73
+ /**
74
+ * CUDA EglFrame type - array or pointer
75
+ */
76
+ typedef enum cudaEglFrameType_enum
77
+ {
78
+ cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */
79
+ cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */
80
+ } cudaEglFrameType;
81
+
82
+ /**
83
+ * Resource location flags- sysmem or vidmem
84
+ *
85
+ * For CUDA context on iGPU, since video and system memory are equivalent -
86
+ * these flags will not have an effect on the execution.
87
+ *
88
+ * For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags
89
+ * to give a hint about the desired location.
90
+ *
91
+ * ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory
92
+ * to be accessed by CUDA.
93
+ *
94
+ * ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated
95
+ * video memory to be accessed by CUDA.
96
+ *
97
+ * There may be an additional latency due to new allocation and data migration,
98
+ * if the frame is produced on a different memory.
99
+ */
100
+ typedef enum cudaEglResourceLocationFlags_enum {
101
+ cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */
102
+ cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */
103
+ } cudaEglResourceLocationFlags;
104
+
105
+ /**
106
+ * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
107
+ */
108
+ typedef enum cudaEglColorFormat_enum {
109
+ cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
110
+ cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
111
+ cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
112
+ cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
113
+ cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
114
+ cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
115
+ cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */
116
+ cudaEglColorFormatR = 9, /**< single color channel in one surface. */
117
+ cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
118
+ cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
119
+ cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
120
+ cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
121
+ cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
122
+ cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
123
+ cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */
124
+ cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */
125
+ cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
126
+ cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
127
+ cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
128
+ cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
129
+ cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
130
+ cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
131
+ cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
132
+ cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
133
+ cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
134
+ cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
135
+ cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
136
+ cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
137
+ cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
138
+ cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
139
+ cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
140
+ cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
141
+ cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
142
+ cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
143
+ cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
144
+ cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
145
+ cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
146
+ cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
147
+ cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
148
+ cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
149
+ cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
150
+ cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
151
+ cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
152
+ cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
153
+ cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
154
+ cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
155
+ cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
156
+ cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
157
+ cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
158
+ cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
159
+ cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
160
+ cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
161
+ cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
162
+ cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
163
+ cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
164
+ cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
165
+ cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
166
+ cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
167
+ cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
168
+ cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
169
+ cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
170
+ cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
171
+ cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
172
+ cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
173
+ cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
174
+ cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
175
+ cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
176
+ cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
177
+ cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
178
+ cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
179
+ cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
180
+ cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
181
+ cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
182
+ cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
183
+ cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
184
+ cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
185
+ cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
186
+ cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
187
+ cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
188
+ cudaEglColorFormatY = 82, /**< Color format for single Y plane. */
189
+ cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
190
+ cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
191
+ cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
192
+ cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
193
+ cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
194
+ cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
195
+ cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
196
+ cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
197
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
198
+ cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
199
+ cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
200
+ cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
201
+ cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
202
+ cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */
203
+ cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */
204
+ cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */
205
+ cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */
206
+ cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */
207
+ cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */
208
+ cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
209
+ cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
210
+ cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
211
+ cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
212
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
213
+ cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
214
+ cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
215
+ cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
216
+ cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
217
+ cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
218
+ cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
219
+ } cudaEglColorFormat;
220
+
221
+ /**
222
+ * CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame
223
+ */
224
+ typedef struct cudaEglPlaneDesc_st {
225
+ unsigned int width; /**< Width of plane */
226
+ unsigned int height; /**< Height of plane */
227
+ unsigned int depth; /**< Depth of plane */
228
+ unsigned int pitch; /**< Pitch of plane */
229
+ unsigned int numChannels; /**< Number of channels for the plane */
230
+ struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */
231
+ unsigned int reserved[4]; /**< Reserved for future use */
232
+ } cudaEglPlaneDesc;
233
+
234
+ /**
235
+ * CUDA EGLFrame Descriptor - structure defining one frame of EGL.
236
+ *
237
+ * Each frame may contain one or more planes depending on whether the surface is Multiplanar or not.
238
+ * Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as:
239
+ * \code
240
+ * typedef struct cudaEglPlaneDesc_st {
241
+ * unsigned int width;
242
+ * unsigned int height;
243
+ * unsigned int depth;
244
+ * unsigned int pitch;
245
+ * unsigned int numChannels;
246
+ * struct cudaChannelFormatDesc channelDesc;
247
+ * unsigned int reserved[4];
248
+ * } cudaEglPlaneDesc;
249
+ * \endcode
250
+
251
+ */
252
+ typedef struct cudaEglFrame_st {
253
+ union {
254
+ cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/
255
+ struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
256
+ } frame;
257
+ cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/
258
+ unsigned int planeCount; /**< Number of planes */
259
+ cudaEglFrameType frameType; /**< Array or Pitch */
260
+ cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
261
+ } cudaEglFrame;
262
+
263
+ /**
264
+ * CUDA EGLSream Connection
265
+ */
266
+ typedef struct CUeglStreamConnection_st *cudaEglStreamConnection;
267
+
268
+ /** @} */ /* END CUDART_TYPES */
269
+
270
+ /**
271
+ * \addtogroup CUDART_EGL EGL Interoperability
272
+ * This section describes the EGL interoperability functions of the CUDA
273
+ * runtime application programming interface.
274
+ *
275
+ * @{
276
+ */
277
+
278
+ /**
279
+ * \brief Registers an EGL image
280
+ *
281
+ * Registers the EGLImageKHR specified by \p image for access by
282
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
283
+ * Additional Mapping/Unmapping is not required for the registered resource and
284
+ * ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
285
+ *
286
+ * The application will be responsible for synchronizing access to shared objects.
287
+ * The application must ensure that any pending operation which access the objects have completed
288
+ * before passing control to CUDA. This may be accomplished by issuing and waiting for
289
+ * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
290
+ * The application will be also responsible for ensuring that any pending operation on the
291
+ * registered CUDA resource has completed prior to executing subsequent commands in other APIs
292
+ * accesing the same memory objects.
293
+ * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
294
+ *
295
+ * The surface's intended usage is specified using \p flags, as follows:
296
+ *
297
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
298
+ * resource will be used. It is therefore assumed that this resource will be
299
+ * read from and written to by CUDA. This is the default value.
300
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
301
+ * will not write to this resource.
302
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
303
+ * CUDA will not read from this resource and will write over the
304
+ * entire contents of the resource, so none of the data previously
305
+ * stored in the resource will be preserved.
306
+ *
307
+ * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
308
+ * typedef void* EGLImageKHR
309
+ *
310
+ * \param pCudaResource - Pointer to the returned object handle
311
+ * \param image - An EGLImageKHR image which can be used to create target resource.
312
+ * \param flags - Map flags
313
+ *
314
+ * \return
315
+ * ::cudaSuccess,
316
+ * ::cudaErrorInvalidResourceHandle,
317
+ * ::cudaErrorInvalidValue,
318
+ * ::cudaErrorUnknown
319
+ *
320
+ * \sa
321
+ * ::cudaGraphicsUnregisterResource,
322
+ * ::cudaGraphicsResourceGetMappedEglFrame,
323
+ * ::cuGraphicsEGLRegisterImage
324
+ */
325
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags);
326
+
327
+ /**
328
+ * \brief Connect CUDA to EGLStream as a consumer.
329
+ *
330
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream.
331
+ *
332
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
333
+ * API to another.
334
+ *
335
+ * \param conn - Pointer to the returned connection handle
336
+ * \param eglStream - EGLStreamKHR handle
337
+ *
338
+ * \return
339
+ * ::cudaSuccess,
340
+ * ::cudaErrorInvalidValue,
341
+ * ::cudaErrorUnknown
342
+ *
343
+ * \sa
344
+ * ::cudaEGLStreamConsumerDisconnect,
345
+ * ::cudaEGLStreamConsumerAcquireFrame,
346
+ * ::cudaEGLStreamConsumerReleaseFrame,
347
+ * ::cuEGLStreamConsumerConnect
348
+ */
349
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream);
350
+
351
+ /**
352
+ * \brief Connect CUDA to EGLStream as a consumer with given flags.
353
+ *
354
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by
355
+ * ::cudaEglResourceLocationFlags.
356
+ *
357
+ * The flags specify whether the consumer wants to access frames from system memory or video memory.
358
+ * Default is ::cudaEglResourceLocationVidmem.
359
+ *
360
+ * \param conn - Pointer to the returned connection handle
361
+ * \param eglStream - EGLStreamKHR handle
362
+ * \param flags - Flags denote intended location - system or video.
363
+ *
364
+ * \return
365
+ * ::cudaSuccess,
366
+ * ::cudaErrorInvalidValue,
367
+ * ::cudaErrorUnknown
368
+ *
369
+ * \sa
370
+ * ::cudaEGLStreamConsumerDisconnect,
371
+ * ::cudaEGLStreamConsumerAcquireFrame,
372
+ * ::cudaEGLStreamConsumerReleaseFrame,
373
+ * ::cuEGLStreamConsumerConnectWithFlags
374
+ */
375
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags);
376
+
377
+ /**
378
+ * \brief Disconnect CUDA as a consumer to EGLStream .
379
+ *
380
+ * Disconnect CUDA as a consumer to EGLStreamKHR.
381
+ *
382
+ * \param conn - Conection to disconnect.
383
+ *
384
+ * \return
385
+ * ::cudaSuccess,
386
+ * ::cudaErrorInvalidValue,
387
+ * ::cudaErrorUnknown
388
+ *
389
+ * \sa
390
+ * ::cudaEGLStreamConsumerConnect,
391
+ * ::cudaEGLStreamConsumerAcquireFrame,
392
+ * ::cudaEGLStreamConsumerReleaseFrame,
393
+ * ::cuEGLStreamConsumerDisconnect
394
+ */
395
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn);
396
+
397
+ /**
398
+ * \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
399
+ *
400
+ * Acquire an image frame from EGLStreamKHR.
401
+ * ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
402
+ * ::cudaEglFrame.
403
+ *
404
+ * \param conn - Connection on which to acquire
405
+ * \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use.
406
+ * \param pStream - CUDA stream for synchronization and any data migrations
407
+ * implied by ::cudaEglResourceLocationFlags.
408
+ * \param timeout - Desired timeout in usec.
409
+ *
410
+ * \return
411
+ * ::cudaSuccess,
412
+ * ::cudaErrorInvalidValue,
413
+ * ::cudaErrorUnknown,
414
+ * ::cudaErrorLaunchTimeout
415
+ *
416
+ * \sa
417
+ * ::cudaEGLStreamConsumerConnect,
418
+ * ::cudaEGLStreamConsumerDisconnect,
419
+ * ::cudaEGLStreamConsumerReleaseFrame,
420
+ * ::cuEGLStreamConsumerAcquireFrame
421
+ */
422
+
423
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn,
424
+ cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout);
425
+ /**
426
+ * \brief Releases the last frame acquired from the EGLStream.
427
+ *
428
+ * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
429
+ *
430
+ * \param conn - Connection on which to release
431
+ * \param pCudaResource - CUDA resource whose corresponding frame is to be released
432
+ * \param pStream - CUDA stream on which release will be done.
433
+ *
434
+ * \return
435
+ * ::cudaSuccess,
436
+ * ::cudaErrorInvalidValue,
437
+ * ::cudaErrorUnknown
438
+ *
439
+ * \sa
440
+ * ::cudaEGLStreamConsumerConnect,
441
+ * ::cudaEGLStreamConsumerDisconnect,
442
+ * ::cudaEGLStreamConsumerAcquireFrame,
443
+ * ::cuEGLStreamConsumerReleaseFrame
444
+ */
445
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn,
446
+ cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream);
447
+
448
+ /**
449
+ * \brief Connect CUDA to EGLStream as a producer.
450
+ *
451
+ * Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
452
+ *
453
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
454
+ * API to another.
455
+ *
456
+ * \param conn - Pointer to the returned connection handle
457
+ * \param eglStream - EGLStreamKHR handle
458
+ * \param width - width of the image to be submitted to the stream
459
+ * \param height - height of the image to be submitted to the stream
460
+ *
461
+ * \return
462
+ * ::cudaSuccess,
463
+ * ::cudaErrorInvalidValue,
464
+ * ::cudaErrorUnknown
465
+ *
466
+ * \sa
467
+ * ::cudaEGLStreamProducerDisconnect,
468
+ * ::cudaEGLStreamProducerPresentFrame,
469
+ * ::cudaEGLStreamProducerReturnFrame,
470
+ * ::cuEGLStreamProducerConnect
471
+ */
472
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn,
473
+ EGLStreamKHR eglStream, EGLint width, EGLint height);
474
+
475
+ /**
476
+ * \brief Disconnect CUDA as a producer to EGLStream .
477
+ *
478
+ * Disconnect CUDA as a producer to EGLStreamKHR.
479
+ *
480
+ * \param conn - Conection to disconnect.
481
+ *
482
+ * \return
483
+ * ::cudaSuccess,
484
+ * ::cudaErrorInvalidValue,
485
+ * ::cudaErrorUnknown
486
+ *
487
+ * \sa
488
+ * ::cudaEGLStreamProducerConnect,
489
+ * ::cudaEGLStreamProducerPresentFrame,
490
+ * ::cudaEGLStreamProducerReturnFrame,
491
+ * ::cuEGLStreamProducerDisconnect
492
+ */
493
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn);
494
+
495
+ /**
496
+ * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
497
+ *
498
+ * The ::cudaEglFrame is defined as:
499
+ * \code
500
+ * typedef struct cudaEglFrame_st {
501
+ * union {
502
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
503
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
504
+ * } frame;
505
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
506
+ * unsigned int planeCount;
507
+ * cudaEglFrameType frameType;
508
+ * cudaEglColorFormat eglColorFormat;
509
+ * } cudaEglFrame;
510
+ * \endcode
511
+ *
512
+ * For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory
513
+ * allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in
514
+ * the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region.
515
+ *
516
+ * \param conn - Connection on which to present the CUDA array
517
+ * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
518
+ * \param pStream - CUDA stream on which to present the frame.
519
+ *
520
+ * \return
521
+ * ::cudaSuccess,
522
+ * ::cudaErrorInvalidValue,
523
+ * ::cudaErrorUnknown
524
+ *
525
+ * \sa
526
+ * ::cudaEGLStreamProducerConnect,
527
+ * ::cudaEGLStreamProducerDisconnect,
528
+ * ::cudaEGLStreamProducerReturnFrame,
529
+ * ::cuEGLStreamProducerPresentFrame
530
+ */
531
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn,
532
+ cudaEglFrame eglframe, cudaStream_t *pStream);
533
+
534
+ /**
535
+ * \brief Return the CUDA eglFrame to the EGLStream last released by the consumer.
536
+ *
537
+ * This API can potentially return cudaErrorLaunchTimeout if the consumer has not
538
+ * returned a frame to EGL stream. If timeout is returned the application can retry.
539
+ *
540
+ * \param conn - Connection on which to present the CUDA array
541
+ * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
542
+ * \param pStream - CUDA stream on which to return the frame.
543
+ *
544
+ * \return
545
+ * ::cudaSuccess,
546
+ * ::cudaErrorLaunchTimeout,
547
+ * ::cudaErrorInvalidValue,
548
+ * ::cudaErrorUnknown
549
+ *
550
+ * \sa
551
+ * ::cudaEGLStreamProducerConnect,
552
+ * ::cudaEGLStreamProducerDisconnect,
553
+ * ::cudaEGLStreamProducerPresentFrame,
554
+ * ::cuEGLStreamProducerReturnFrame
555
+ */
556
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn,
557
+ cudaEglFrame *eglframe, cudaStream_t *pStream);
558
+
559
+ /**
560
+ * \brief Get an eglFrame through which to access a registered EGL graphics resource.
561
+ *
562
+ * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
563
+ * \p resource may be accessed.
564
+ * This API can only be called for EGL graphics resources.
565
+ *
566
+ * The ::cudaEglFrame is defined as
567
+ * \code
568
+ * typedef struct cudaEglFrame_st {
569
+ * union {
570
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
571
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
572
+ * } frame;
573
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
574
+ * unsigned int planeCount;
575
+ * cudaEglFrameType frameType;
576
+ * cudaEglColorFormat eglColorFormat;
577
+ * } cudaEglFrame;
578
+ * \endcode
579
+ *
580
+ *
581
+ * \param eglFrame - Returned eglFrame.
582
+ * \param resource - Registered resource to access.
583
+ * \param index - Index for cubemap surfaces.
584
+ * \param mipLevel - Mipmap level for the subresource to access.
585
+ *
586
+ * \return
587
+ * ::cudaSuccess,
588
+ * ::cudaErrorInvalidValue,
589
+ * ::cudaErrorUnknown
590
+ *
591
+ * \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application.
592
+ *
593
+ * \sa
594
+ * ::cudaGraphicsSubResourceGetMappedArray,
595
+ * ::cudaGraphicsResourceGetMappedPointer,
596
+ * ::cuGraphicsResourceGetMappedEglFrame
597
+ */
598
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame,
599
+ cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel);
600
+
601
+ /**
602
+ * \brief Creates an event from EGLSync object
603
+ *
604
+ * Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified
605
+ * via \p flags. Valid flags include:
606
+ * - ::cudaEventDefault: Default event creation flag.
607
+ * - ::cudaEventBlockingSync: Specifies that the created event should use blocking
608
+ * synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on
609
+ * an event created with this flag will block until the event has actually
610
+ * been completed.
611
+ *
612
+ * ::cudaEventRecord and TimingData are not supported for events created from EGLSync.
613
+ *
614
+ * The EGLSyncKHR is an opaque handle to an EGL sync object.
615
+ * typedef void* EGLSyncKHR
616
+ *
617
+ * \param phEvent - Returns newly created event
618
+ * \param eglSync - Opaque handle to EGLSync object
619
+ * \param flags - Event creation flags
620
+ *
621
+ * \return
622
+ * ::cudaSuccess,
623
+ * ::cudaErrorInitializationError,
624
+ * ::cudaErrorInvalidValue,
625
+ * ::cudaErrorLaunchFailure,
626
+ * ::cudaErrorMemoryAllocation
627
+ *
628
+ * \sa
629
+ * ::cudaEventQuery,
630
+ * ::cudaEventSynchronize,
631
+ * ::cudaEventDestroy
632
+ */
633
+ extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags);
634
+
635
+ /** @} */ /* END CUDART_EGL */
636
+
637
+ #if defined(__cplusplus)
638
+ }
639
+ #endif /* __cplusplus */
640
+
641
+ #endif /* __CUDA_EGL_INTEROP_H__ */
642
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp16.hpp ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.h ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef __CUDA_FP8_H__
51
+ #define __CUDA_FP8_H__
52
+
53
+ /* Set up function decorations */
54
+ #if defined(__CUDACC__)
55
+ #define __CUDA_FP8_DECL__ static __device__ __inline__
56
+ #define __CUDA_HOSTDEVICE_FP8__ __host__ __device__
57
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static __host__ __device__ __inline__
58
+ #else /* !defined(__CUDACC__) */
59
+ #if defined(__GNUC__)
60
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static __attribute__((unused))
61
+ #else
62
+ #define __CUDA_HOSTDEVICE_FP8_DECL__ static
63
+ #endif /* defined(__GNUC__) */
64
+ #define __CUDA_HOSTDEVICE_FP8__
65
+ #endif /* defined(__CUDACC_) */
66
+
67
+ #if !defined(_MSC_VER) && __cplusplus >= 201103L
68
+ #define __CPP_VERSION_AT_LEAST_11_FP8
69
+ #elif _MSC_FULL_VER >= 190024210 && _MSVC_LANG >= 201103L
70
+ #define __CPP_VERSION_AT_LEAST_11_FP8
71
+ #endif
72
+
73
+ /* bring in __half_raw data type */
74
+ #include "cuda_fp16.h"
75
+ /* bring in __nv_bfloat16_raw data type */
76
+ #include "cuda_bf16.h"
77
+ /* bring in float2, double4, etc vector types */
78
+ #include "vector_types.h"
79
+
80
+ /**
81
+ * \defgroup CUDA_MATH_INTRINSIC_FP8 FP8 Intrinsics
82
+ * This section describes fp8 intrinsic functions.
83
+ * To use these functions, include the header file \p cuda_fp8.h in your
84
+ * program.
85
+ * The following macros are available to help users selectively enable/disable
86
+ * various definitions present in the header file:
87
+ * - \p __CUDA_NO_FP8_CONVERSIONS__ - If defined, this macro will prevent any
88
+ * use of the C++ type conversions (converting constructors and conversion
89
+ * operators) defined in the header.
90
+ * - \p __CUDA_NO_FP8_CONVERSION_OPERATORS__ - If defined, this macro will
91
+ * prevent any use of the C++ conversion operators from \p fp8 to other types.
92
+ */
93
+
94
+ /**
95
+ * \defgroup CUDA_MATH_FP8_MISC FP8 Conversion and Data Movement
96
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
97
+ * To use these functions, include the header file \p cuda_fp8.h in your
98
+ * program.
99
+ */
100
+
101
+ /**
102
+ * \ingroup CUDA_MATH_FP8_MISC
103
+ * \brief 8-bit \p unsigned \p integer
104
+ * type abstraction used to for \p fp8 floating-point
105
+ * numbers storage.
106
+ */
107
+ typedef unsigned char __nv_fp8_storage_t;
108
+
109
+ /**
110
+ * \ingroup CUDA_MATH_FP8_MISC
111
+ * \brief 16-bit \p unsigned \p integer
112
+ * type abstraction used to for storage of pairs of
113
+ * \p fp8 floating-point numbers.
114
+ */
115
+ typedef unsigned short int __nv_fp8x2_storage_t;
116
+
117
+ /**
118
+ * \ingroup CUDA_MATH_FP8_MISC
119
+ * \brief 32-bit \p unsigned \p integer
120
+ * type abstraction used to for storage of tetrads of
121
+ * \p fp8 floating-point numbers.
122
+ */
123
+ typedef unsigned int __nv_fp8x4_storage_t;
124
+
125
+ /**
126
+ * \ingroup CUDA_MATH_FP8_MISC
127
+ * \brief Enumerates the modes applicable when
128
+ * performing a narrowing conversion to \p fp8 destination types.
129
+ */
130
+ typedef enum __nv_saturation_t {
131
+ /**
132
+ * Means no saturation to finite is performed when conversion
133
+ * results in rounding values outside the range of destination
134
+ * type.
135
+ * NOTE: for fp8 type of e4m3 kind, the results that are larger
136
+ * than the maximum representable finite number of the target
137
+ * format become NaN.
138
+ */
139
+ __NV_NOSAT,
140
+ /**
141
+ * Means input larger than the maximum representable
142
+ * finite number MAXNORM of the target format round to the
143
+ * MAXNORM of the same sign as input.
144
+ */
145
+ __NV_SATFINITE,
146
+ } __nv_saturation_t;
147
+
148
+ /**
149
+ * \ingroup CUDA_MATH_FP8_MISC
150
+ * \brief Enumerates the possible
151
+ * interpretations of the 8-bit values when referring to them as
152
+ * \p fp8 types.
153
+ */
154
+ typedef enum __nv_fp8_interpretation_t {
155
+ __NV_E4M3, /**< Stands for \p fp8 numbers of \p e4m3 kind. */
156
+ __NV_E5M2, /**< Stands for \p fp8 numbers of \p e5m2 kind. */
157
+ } __nv_fp8_interpretation_t;
158
+
159
+ /* Forward-declaration of C-style APIs */
160
+
161
+ /**
162
+ * \ingroup CUDA_MATH_FP8_MISC
163
+ * \brief Converts input \p double precision \p x to \p fp8 type of the
164
+ * requested kind using round-to-nearest-even rounding and requested saturation
165
+ * mode.
166
+ *
167
+ * \details Converts input \p x to \p fp8 type of the kind specified by
168
+ * \p fp8_interpretation parameter,
169
+ * using round-to-nearest-even rounding and
170
+ * saturation mode specified by \p saturate parameter.
171
+ *
172
+ * \returns
173
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
174
+ */
175
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
176
+ __nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
177
+ const __nv_fp8_interpretation_t fp8_interpretation);
178
+
179
+ /**
180
+ * \ingroup CUDA_MATH_FP8_MISC
181
+ * \brief Converts input vector of two \p double precision numbers packed
182
+ * in \p double2 \p x into a vector of two values of \p fp8 type of
183
+ * the requested kind using round-to-nearest-even rounding and requested
184
+ * saturation mode.
185
+ *
186
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
187
+ * kind specified by \p fp8_interpretation parameter, using
188
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
189
+ * parameter.
190
+ *
191
+ * \returns
192
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
193
+ */
194
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
195
+ __nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
196
+ const __nv_fp8_interpretation_t fp8_interpretation);
197
+
198
+ /**
199
+ * \ingroup CUDA_MATH_FP8_MISC
200
+ * \brief Converts input \p single precision \p x to \p fp8 type of the
201
+ * requested kind using round-to-nearest-even rounding and requested saturation
202
+ * mode.
203
+ *
204
+ * \details Converts input \p x to \p fp8 type of the kind specified by
205
+ * \p fp8_interpretation parameter,
206
+ * using round-to-nearest-even rounding and
207
+ * saturation mode specified by \p saturate parameter.
208
+ *
209
+ * \returns
210
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
211
+ */
212
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
213
+ __nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
214
+ const __nv_fp8_interpretation_t fp8_interpretation);
215
+
216
+ /**
217
+ * \ingroup CUDA_MATH_FP8_MISC
218
+ * \brief Converts input vector of two \p single precision numbers packed
219
+ * in \p float2 \p x into a vector of two values of \p fp8 type of
220
+ * the requested kind using round-to-nearest-even rounding and requested
221
+ * saturation mode.
222
+ *
223
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
224
+ * kind specified by \p fp8_interpretation parameter, using
225
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
226
+ * parameter.
227
+ *
228
+ * \returns
229
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
230
+ */
231
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
232
+ __nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
233
+ const __nv_fp8_interpretation_t fp8_interpretation);
234
+
235
+ /**
236
+ * \ingroup CUDA_MATH_FP8_MISC
237
+ * \brief Converts input \p half precision \p x to \p fp8 type of the requested
238
+ * kind using round-to-nearest-even rounding and requested saturation mode.
239
+ *
240
+ * \details Converts input \p x to \p fp8 type of the kind specified by
241
+ * \p fp8_interpretation parameter,
242
+ * using round-to-nearest-even rounding and
243
+ * saturation mode specified by \p saturate parameter.
244
+ *
245
+ * \returns
246
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
247
+ */
248
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
249
+ __nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
250
+ const __nv_fp8_interpretation_t fp8_interpretation);
251
+
252
+ /**
253
+ * \ingroup CUDA_MATH_FP8_MISC
254
+ * \brief Converts input vector of two \p half precision numbers packed
255
+ * in \p __half2_raw \p x into a vector of two values of \p fp8 type of
256
+ * the requested kind using round-to-nearest-even rounding and requested
257
+ * saturation mode.
258
+ *
259
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
260
+ * kind specified by \p fp8_interpretation parameter, using
261
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
262
+ * parameter.
263
+ *
264
+ * \returns
265
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
266
+ */
267
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
268
+ const __half2_raw x, const __nv_saturation_t saturate,
269
+ const __nv_fp8_interpretation_t fp8_interpretation);
270
+
271
+ /**
272
+ * \ingroup CUDA_MATH_FP8_MISC
273
+ * \brief Converts input \p nv_bfloat16 precision \p x to \p fp8 type of the
274
+ * requested kind using round-to-nearest-even rounding and requested saturation
275
+ * mode.
276
+ *
277
+ * \details Converts input \p x to \p fp8 type of the kind specified by
278
+ * \p fp8_interpretation parameter,
279
+ * using round-to-nearest-even rounding and
280
+ * saturation mode specified by \p saturate parameter.
281
+ *
282
+ * \returns
283
+ * - The \p __nv_fp8_storage_t value holds the result of conversion.
284
+ */
285
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
286
+ const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
287
+ const __nv_fp8_interpretation_t fp8_interpretation);
288
+
289
+ /**
290
+ * \ingroup CUDA_MATH_FP8_MISC
291
+ * \brief Converts input vector of two \p nv_bfloat16 precision numbers packed
292
+ * in \p __nv_bfloat162_raw \p x into a vector of two values of \p fp8 type of
293
+ * the requested kind using round-to-nearest-even rounding and requested
294
+ * saturation mode.
295
+ *
296
+ * \details Converts input vector \p x to a vector of two \p fp8 values of the
297
+ * kind specified by \p fp8_interpretation parameter, using
298
+ * round-to-nearest-even rounding and saturation mode specified by \p saturate
299
+ * parameter.
300
+ *
301
+ * \returns
302
+ * - The \p __nv_fp8x2_storage_t value holds the result of conversion.
303
+ */
304
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
305
+ __nv_cvt_bfloat16raw2_to_fp8x2(
306
+ const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
307
+ const __nv_fp8_interpretation_t fp8_interpretation);
308
+
309
+ /**
310
+ * \ingroup CUDA_MATH_FP8_MISC
311
+ * \brief Converts input \p fp8 \p x of the specified kind
312
+ * to \p half precision.
313
+ *
314
+ * \details Converts input \p x of \p fp8 type of the kind specified by
315
+ * \p fp8_interpretation parameter
316
+ * to \p half precision.
317
+ *
318
+ * \returns
319
+ * - The \p __half_raw value holds the result of conversion.
320
+ */
321
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
322
+ __nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
323
+ const __nv_fp8_interpretation_t fp8_interpretation);
324
+ /**
325
+ * \ingroup CUDA_MATH_FP8_MISC
326
+ * \brief Converts input vector of two \p fp8 values of the specified kind
327
+ * to a vector of two \p half precision values packed in \p __half2_raw
328
+ * structure.
329
+ *
330
+ * \details Converts input vector \p x of \p fp8 type of the kind specified by
331
+ * \p fp8_interpretation parameter
332
+ * to a vector of two \p half precision values and returns as \p __half2_raw
333
+ * structure.
334
+ *
335
+ * \returns
336
+ * - The \p __half2_raw value holds the result of conversion.
337
+ */
338
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
339
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
340
+ const __nv_fp8_interpretation_t fp8_interpretation);
341
+
342
+ #if defined(__cplusplus)
343
+
344
+ #define __CUDA_FP8_TYPES_EXIST__
345
+
346
+ /* Forward-declaration of structures defined in "cuda_fp8.hpp" */
347
+ struct __nv_fp8_e5m2;
348
+ struct __nv_fp8x2_e5m2;
349
+ struct __nv_fp8x4_e5m2;
350
+
351
+ struct __nv_fp8_e4m3;
352
+ struct __nv_fp8x2_e4m3;
353
+ struct __nv_fp8x4_e4m3;
354
+
355
+ #endif /* defined(__cplusplus) */
356
+
357
+ #include "cuda_fp8.hpp"
358
+
359
+ #undef __CUDA_FP8_DECL__
360
+ #undef __CUDA_HOSTDEVICE_FP8__
361
+ #undef __CUDA_HOSTDEVICE_FP8_DECL__
362
+
363
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
364
+ #undef __CPP_VERSION_AT_LEAST_11_FP8
365
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
366
+
367
+ #endif /* end of include guard: __CUDA_FP8_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_GL_INTEROP_H__)
51
+ #define __CUDA_GL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #if defined(__APPLE__)
56
+
57
+ #include <OpenGL/gl.h>
58
+
59
+ #else /* __APPLE__ */
60
+
61
+ #if defined(__arm__) || defined(__aarch64__)
62
+ #ifndef GL_VERSION
63
+ #error Please include the appropriate gl headers before including cuda_gl_interop.h
64
+ #endif
65
+ #else
66
+ #include <GL/gl.h>
67
+ #endif
68
+
69
+ #endif /* __APPLE__ */
70
+
71
+ /** \cond impl_private */
72
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
73
+ #define __CUDA_DEPRECATED
74
+ #elif defined(_MSC_VER)
75
+ #define __CUDA_DEPRECATED __declspec(deprecated)
76
+ #elif defined(__GNUC__)
77
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
78
+ #else
79
+ #define __CUDA_DEPRECATED
80
+ #endif
81
+ /** \endcond impl_private */
82
+
83
+ #if defined(__cplusplus)
84
+ extern "C" {
85
+ #endif /* __cplusplus */
86
+
87
+ /**
88
+ * \addtogroup CUDART_OPENGL OpenGL Interoperability
89
+ * This section describes the OpenGL interoperability functions of the CUDA
90
+ * runtime application programming interface. Note that mapping of OpenGL
91
+ * resources is performed with the graphics API agnostic, resource mapping
92
+ * interface described in \ref CUDART_INTEROP "Graphics Interopability".
93
+ *
94
+ * @{
95
+ */
96
+
97
+ /**
98
+ * CUDA devices corresponding to the current OpenGL context
99
+ */
100
+ enum cudaGLDeviceList
101
+ {
102
+ cudaGLDeviceListAll = 1, /**< The CUDA devices for all GPUs used by the current OpenGL context */
103
+ cudaGLDeviceListCurrentFrame = 2, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
104
+ cudaGLDeviceListNextFrame = 3 /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
105
+ };
106
+
107
+ /**
108
+ * \brief Gets the CUDA devices associated with the current OpenGL context
109
+ *
110
+ * Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
111
+ * corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
112
+ * at most \p cudaDeviceCount of the CUDA-compatible devices corresponding to
113
+ * the current OpenGL context. If any of the GPUs being used by the current OpenGL
114
+ * context are not CUDA capable then the call will return ::cudaErrorNoDevice.
115
+ *
116
+ * \param pCudaDeviceCount - Returned number of CUDA devices corresponding to the
117
+ * current OpenGL context
118
+ * \param pCudaDevices - Returned CUDA devices corresponding to the current
119
+ * OpenGL context
120
+ * \param cudaDeviceCount - The size of the output device array \p pCudaDevices
121
+ * \param deviceList - The set of devices to return. This set may be
122
+ * ::cudaGLDeviceListAll for all devices,
123
+ * ::cudaGLDeviceListCurrentFrame for the devices used to
124
+ * render the current frame (in SLI), or
125
+ * ::cudaGLDeviceListNextFrame for the devices used to
126
+ * render the next frame (in SLI).
127
+ *
128
+ * \return
129
+ * ::cudaSuccess,
130
+ * ::cudaErrorNoDevice,
131
+ * ::cudaErrorInvalidGraphicsContext,
132
+ * ::cudaErrorOperatingSystem,
133
+ * ::cudaErrorUnknown
134
+ *
135
+ * \note This function is not supported on Mac OS X.
136
+ * \notefnerr
137
+ *
138
+ * \sa
139
+ * ::cudaGraphicsUnregisterResource,
140
+ * ::cudaGraphicsMapResources,
141
+ * ::cudaGraphicsSubResourceGetMappedArray,
142
+ * ::cudaGraphicsResourceGetMappedPointer,
143
+ * ::cuGLGetDevices
144
+ */
145
+ extern __host__ cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices, unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList);
146
+
147
+ /**
148
+ * \brief Register an OpenGL texture or renderbuffer object
149
+ *
150
+ * Registers the texture or renderbuffer object specified by \p image for access by CUDA.
151
+ * A handle to the registered object is returned as \p resource.
152
+ *
153
+ * \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
154
+ * ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
155
+ * or ::GL_RENDERBUFFER.
156
+ *
157
+ * The register flags \p flags specify the intended usage, as follows:
158
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
159
+ * resource will be used. It is therefore assumed that this resource will be
160
+ * read from and written to by CUDA. This is the default value.
161
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
162
+ * will not write to this resource.
163
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
164
+ * CUDA will not read from this resource and will write over the
165
+ * entire contents of the resource, so none of the data previously
166
+ * stored in the resource will be preserved.
167
+ * - ::cudaGraphicsRegisterFlagsSurfaceLoadStore: Specifies that CUDA will
168
+ * bind this resource to a surface reference.
169
+ * - ::cudaGraphicsRegisterFlagsTextureGather: Specifies that CUDA will perform
170
+ * texture gather operations on this resource.
171
+ *
172
+ * The following image formats are supported. For brevity's sake, the list is abbreviated.
173
+ * For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
174
+ * {GL_R8, GL_R16, GL_RG8, GL_RG16} :
175
+ * - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
176
+ * - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
177
+ * - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
178
+ * {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
179
+ *
180
+ * The following image classes are currently disallowed:
181
+ * - Textures with borders
182
+ * - Multisampled renderbuffers
183
+ *
184
+ * \param resource - Pointer to the returned object handle
185
+ * \param image - name of texture or renderbuffer object to be registered
186
+ * \param target - Identifies the type of object specified by \p image
187
+ * \param flags - Register flags
188
+ *
189
+ * \return
190
+ * ::cudaSuccess,
191
+ * ::cudaErrorInvalidDevice,
192
+ * ::cudaErrorInvalidValue,
193
+ * ::cudaErrorInvalidResourceHandle,
194
+ * ::cudaErrorOperatingSystem,
195
+ * ::cudaErrorUnknown
196
+ * \notefnerr
197
+ *
198
+ * \sa
199
+ * ::cudaGraphicsUnregisterResource,
200
+ * ::cudaGraphicsMapResources,
201
+ * ::cudaGraphicsSubResourceGetMappedArray,
202
+ * ::cuGraphicsGLRegisterImage
203
+ */
204
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(struct cudaGraphicsResource **resource, GLuint image, GLenum target, unsigned int flags);
205
+
206
+ /**
207
+ * \brief Registers an OpenGL buffer object
208
+ *
209
+ * Registers the buffer object specified by \p buffer for access by
210
+ * CUDA. A handle to the registered object is returned as \p
211
+ * resource. The register flags \p flags specify the intended usage,
212
+ * as follows:
213
+ *
214
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
215
+ * resource will be used. It is therefore assumed that this resource will be
216
+ * read from and written to by CUDA. This is the default value.
217
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
218
+ * will not write to this resource.
219
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
220
+ * CUDA will not read from this resource and will write over the
221
+ * entire contents of the resource, so none of the data previously
222
+ * stored in the resource will be preserved.
223
+ *
224
+ * \param resource - Pointer to the returned object handle
225
+ * \param buffer - name of buffer object to be registered
226
+ * \param flags - Register flags
227
+ *
228
+ * \return
229
+ * ::cudaSuccess,
230
+ * ::cudaErrorInvalidDevice,
231
+ * ::cudaErrorInvalidValue,
232
+ * ::cudaErrorInvalidResourceHandle,
233
+ * ::cudaErrorOperatingSystem,
234
+ * ::cudaErrorUnknown
235
+ * \notefnerr
236
+ *
237
+ * \sa
238
+ * ::cudaGraphicsUnregisterResource,
239
+ * ::cudaGraphicsMapResources,
240
+ * ::cudaGraphicsResourceGetMappedPointer,
241
+ * ::cuGraphicsGLRegisterBuffer
242
+ */
243
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(struct cudaGraphicsResource **resource, GLuint buffer, unsigned int flags);
244
+
245
+ #ifdef _WIN32
246
+ #ifndef WGL_NV_gpu_affinity
247
+ typedef void* HGPUNV;
248
+ #endif
249
+
250
+ /**
251
+ * \brief Gets the CUDA device associated with hGpu
252
+ *
253
+ * Returns the CUDA device associated with a hGpu, if applicable.
254
+ *
255
+ * \param device - Returns the device associated with hGpu, or -1 if hGpu is
256
+ * not a compute device.
257
+ * \param hGpu - Handle to a GPU, as queried via WGL_NV_gpu_affinity
258
+ *
259
+ * \return
260
+ * ::cudaSuccess
261
+ * \notefnerr
262
+ *
263
+ * \sa
264
+ * ::WGL_NV_gpu_affinity,
265
+ * ::cuWGLGetDevice
266
+ */
267
+ extern __host__ cudaError_t CUDARTAPI cudaWGLGetDevice(int *device, HGPUNV hGpu);
268
+ #endif
269
+
270
+ /** @} */ /* END CUDART_OPENGL */
271
+
272
+ /**
273
+ * \addtogroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED]
274
+ * This section describes deprecated OpenGL interoperability functionality.
275
+ *
276
+ * @{
277
+ */
278
+
279
+ /**
280
+ * CUDA GL Map Flags
281
+ */
282
+ enum cudaGLMapFlags
283
+ {
284
+ cudaGLMapFlagsNone = 0, /**< Default; Assume resource can be read/written */
285
+ cudaGLMapFlagsReadOnly = 1, /**< CUDA kernels will not write to this resource */
286
+ cudaGLMapFlagsWriteDiscard = 2 /**< CUDA kernels will only write to and will not read from this resource */
287
+ };
288
+
289
+ /**
290
+ * \brief Sets a CUDA device to use OpenGL interoperability
291
+ *
292
+ * \deprecated This function is deprecated as of CUDA 5.0.
293
+ *
294
+ * This function is deprecated and should no longer be used. It is
295
+ * no longer necessary to associate a CUDA device with an OpenGL
296
+ * context in order to achieve maximum interoperability performance.
297
+ *
298
+ * This function will immediately initialize the primary context on
299
+ * \p device if needed.
300
+ *
301
+ * \param device - Device to use for OpenGL interoperability
302
+ *
303
+ * \return
304
+ * ::cudaSuccess,
305
+ * ::cudaErrorInvalidDevice,
306
+ * ::cudaErrorSetOnActiveProcess
307
+ * \notefnerr
308
+ *
309
+ * \sa ::cudaGraphicsGLRegisterBuffer, ::cudaGraphicsGLRegisterImage
310
+ */
311
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetGLDevice(int device);
312
+
313
+ /**
314
+ * \brief Registers a buffer object for access by CUDA
315
+ *
316
+ * \deprecated This function is deprecated as of CUDA 3.0.
317
+ *
318
+ * Registers the buffer object of ID \p bufObj for access by
319
+ * CUDA. This function must be called before CUDA can map the buffer
320
+ * object. The OpenGL context used to create the buffer, or another
321
+ * context from the same share group, must be bound to the current
322
+ * thread when this is called.
323
+ *
324
+ * \param bufObj - Buffer object ID to register
325
+ *
326
+ * \return
327
+ * ::cudaSuccess,
328
+ * ::cudaErrorInitializationError
329
+ * \notefnerr
330
+ *
331
+ * \sa ::cudaGraphicsGLRegisterBuffer
332
+ */
333
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLRegisterBufferObject(GLuint bufObj);
334
+
335
+ /**
336
+ * \brief Maps a buffer object for access by CUDA
337
+ *
338
+ * \deprecated This function is deprecated as of CUDA 3.0.
339
+ *
340
+ * Maps the buffer object of ID \p bufObj into the address space of
341
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
342
+ * mapping. The buffer must have previously been registered by
343
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
344
+ * by CUDA, any OpenGL operation which references the buffer will
345
+ * result in undefined behavior. The OpenGL context used to create
346
+ * the buffer, or another context from the same share group, must be
347
+ * bound to the current thread when this is called.
348
+ *
349
+ * All streams in the current thread are synchronized with the current
350
+ * GL context.
351
+ *
352
+ * \param devPtr - Returned device pointer to CUDA object
353
+ * \param bufObj - Buffer object ID to map
354
+ *
355
+ * \return
356
+ * ::cudaSuccess,
357
+ * ::cudaErrorMapBufferObjectFailed
358
+ * \notefnerr
359
+ *
360
+ * \sa ::cudaGraphicsMapResources
361
+ */
362
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObject(void **devPtr, GLuint bufObj);
363
+
364
+ /**
365
+ * \brief Unmaps a buffer object for access by CUDA
366
+ *
367
+ * \deprecated This function is deprecated as of CUDA 3.0.
368
+ *
369
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
370
+ * a buffer is unmapped, the base address returned by
371
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
372
+ * the address result in undefined behavior. The OpenGL context used
373
+ * to create the buffer, or another context from the same share group,
374
+ * must be bound to the current thread when this is called.
375
+ *
376
+ * All streams in the current thread are synchronized with the current
377
+ * GL context.
378
+ *
379
+ * \param bufObj - Buffer object to unmap
380
+ *
381
+ * \return
382
+ * ::cudaSuccess,
383
+ * ::cudaErrorUnmapBufferObjectFailed
384
+ * \notefnerr
385
+ *
386
+ * \sa ::cudaGraphicsUnmapResources
387
+ */
388
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObject(GLuint bufObj);
389
+
390
+ /**
391
+ * \brief Unregisters a buffer object for access by CUDA
392
+ *
393
+ * \deprecated This function is deprecated as of CUDA 3.0.
394
+ *
395
+ * Unregisters the buffer object of ID \p bufObj for access by CUDA
396
+ * and releases any CUDA resources associated with the buffer. Once a
397
+ * buffer is unregistered, it may no longer be mapped by CUDA. The GL
398
+ * context used to create the buffer, or another context from the
399
+ * same share group, must be bound to the current thread when this is
400
+ * called.
401
+ *
402
+ * \param bufObj - Buffer object to unregister
403
+ *
404
+ * \return
405
+ * ::cudaSuccess
406
+ * \notefnerr
407
+ *
408
+ * \sa ::cudaGraphicsUnregisterResource
409
+ */
410
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnregisterBufferObject(GLuint bufObj);
411
+
412
+ /**
413
+ * \brief Set usage flags for mapping an OpenGL buffer
414
+ *
415
+ * \deprecated This function is deprecated as of CUDA 3.0.
416
+ *
417
+ * Set flags for mapping the OpenGL buffer \p bufObj
418
+ *
419
+ * Changes to flags will take effect the next time \p bufObj is mapped.
420
+ * The \p flags argument may be any of the following:
421
+ *
422
+ * - ::cudaGLMapFlagsNone: Specifies no hints about how this buffer will
423
+ * be used. It is therefore assumed that this buffer will be read from and
424
+ * written to by CUDA kernels. This is the default value.
425
+ * - ::cudaGLMapFlagsReadOnly: Specifies that CUDA kernels which access this
426
+ * buffer will not write to the buffer.
427
+ * - ::cudaGLMapFlagsWriteDiscard: Specifies that CUDA kernels which access
428
+ * this buffer will not read from the buffer and will write over the
429
+ * entire contents of the buffer, so none of the data previously stored in
430
+ * the buffer will be preserved.
431
+ *
432
+ * If \p bufObj has not been registered for use with CUDA, then
433
+ * ::cudaErrorInvalidResourceHandle is returned. If \p bufObj is presently
434
+ * mapped for access by CUDA, then ::cudaErrorUnknown is returned.
435
+ *
436
+ * \param bufObj - Registered buffer object to set flags for
437
+ * \param flags - Parameters for buffer mapping
438
+ *
439
+ * \return
440
+ * ::cudaSuccess,
441
+ * ::cudaErrorInvalidValue,
442
+ * ::cudaErrorInvalidResourceHandle,
443
+ * ::cudaErrorUnknown
444
+ * \notefnerr
445
+ *
446
+ * \sa ::cudaGraphicsResourceSetMapFlags
447
+ */
448
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetBufferObjectMapFlags(GLuint bufObj, unsigned int flags);
449
+
450
+ /**
451
+ * \brief Maps a buffer object for access by CUDA
452
+ *
453
+ * \deprecated This function is deprecated as of CUDA 3.0.
454
+ *
455
+ * Maps the buffer object of ID \p bufObj into the address space of
456
+ * CUDA and returns in \p *devPtr the base pointer of the resulting
457
+ * mapping. The buffer must have previously been registered by
458
+ * calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
459
+ * by CUDA, any OpenGL operation which references the buffer will
460
+ * result in undefined behavior. The OpenGL context used to create
461
+ * the buffer, or another context from the same share group, must be
462
+ * bound to the current thread when this is called.
463
+ *
464
+ * Stream /p stream is synchronized with the current GL context.
465
+ *
466
+ * \param devPtr - Returned device pointer to CUDA object
467
+ * \param bufObj - Buffer object ID to map
468
+ * \param stream - Stream to synchronize
469
+ *
470
+ * \return
471
+ * ::cudaSuccess,
472
+ * ::cudaErrorMapBufferObjectFailed
473
+ * \notefnerr
474
+ *
475
+ * \sa ::cudaGraphicsMapResources
476
+ */
477
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObjectAsync(void **devPtr, GLuint bufObj, cudaStream_t stream);
478
+
479
+ /**
480
+ * \brief Unmaps a buffer object for access by CUDA
481
+ *
482
+ * \deprecated This function is deprecated as of CUDA 3.0.
483
+ *
484
+ * Unmaps the buffer object of ID \p bufObj for access by CUDA. When
485
+ * a buffer is unmapped, the base address returned by
486
+ * ::cudaGLMapBufferObject() is invalid and subsequent references to
487
+ * the address result in undefined behavior. The OpenGL context used
488
+ * to create the buffer, or another context from the same share group,
489
+ * must be bound to the current thread when this is called.
490
+ *
491
+ * Stream /p stream is synchronized with the current GL context.
492
+ *
493
+ * \param bufObj - Buffer object to unmap
494
+ * \param stream - Stream to synchronize
495
+ *
496
+ * \return
497
+ * ::cudaSuccess,
498
+ * ::cudaErrorUnmapBufferObjectFailed
499
+ * \notefnerr
500
+ *
501
+ * \sa ::cudaGraphicsUnmapResources
502
+ */
503
+ extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObjectAsync(GLuint bufObj, cudaStream_t stream);
504
+
505
+ /** @} */ /* END CUDART_OPENGL_DEPRECATED */
506
+
507
+ #if defined(__cplusplus)
508
+ }
509
+ #endif /* __cplusplus */
510
+
511
+ #undef __CUDA_DEPRECATED
512
+
513
+ #endif /* __CUDA_GL_INTEROP_H__ */
514
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h ADDED
@@ -0,0 +1,1958 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * CUDA Occupancy Calculator
52
+ *
53
+ * NAME
54
+ *
55
+ * cudaOccMaxActiveBlocksPerMultiprocessor,
56
+ * cudaOccMaxPotentialOccupancyBlockSize,
57
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSMem
58
+ * cudaOccAvailableDynamicSMemPerBlock
59
+ *
60
+ * DESCRIPTION
61
+ *
62
+ * The CUDA occupancy calculator provides a standalone, programmatical
63
+ * interface to compute the occupancy of a function on a device. It can also
64
+ * provide occupancy-oriented launch configuration suggestions.
65
+ *
66
+ * The function and device are defined by the user through
67
+ * cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState
68
+ * structures. All APIs require all 3 of them.
69
+ *
70
+ * See the structure definition for more details about the device / function
71
+ * descriptors.
72
+ *
73
+ * See each API's prototype for API usage.
74
+ *
75
+ * COMPATIBILITY
76
+ *
77
+ * The occupancy calculator will be updated on each major CUDA toolkit
78
+ * release. It does not provide forward compatibility, i.e. new hardwares
79
+ * released after this implementation's release will not be supported.
80
+ *
81
+ * NOTE
82
+ *
83
+ * If there is access to CUDA runtime, and the sole intent is to calculate
84
+ * occupancy related values on one of the accessible CUDA devices, using CUDA
85
+ * runtime's occupancy calculation APIs is recommended.
86
+ *
87
+ */
88
+
89
+ #ifndef __cuda_occupancy_h__
90
+ #define __cuda_occupancy_h__
91
+
92
+ #include <stddef.h>
93
+ #include <limits.h>
94
+ #include <string.h>
95
+
96
+
97
+ // __OCC_INLINE will be undefined at the end of this header
98
+ //
99
+ #ifdef __CUDACC__
100
+ #define __OCC_INLINE inline __host__ __device__
101
+ #elif defined _MSC_VER
102
+ #define __OCC_INLINE __inline
103
+ #else // GNUCC assumed
104
+ #define __OCC_INLINE inline
105
+ #endif
106
+
107
+ enum cudaOccError_enum {
108
+ CUDA_OCC_SUCCESS = 0, // no error encountered
109
+ CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid
110
+ CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in
111
+ // current implementation or device is
112
+ // invalid
113
+ };
114
+ typedef enum cudaOccError_enum cudaOccError;
115
+
116
+ typedef struct cudaOccResult cudaOccResult;
117
+ typedef struct cudaOccDeviceProp cudaOccDeviceProp;
118
+ typedef struct cudaOccFuncAttributes cudaOccFuncAttributes;
119
+ typedef struct cudaOccDeviceState cudaOccDeviceState;
120
+
121
+ /**
122
+ * The CUDA occupancy calculator computes the occupancy of the function
123
+ * described by attributes with the given block size (blockSize), static device
124
+ * properties (properties), dynamic device states (states) and per-block dynamic
125
+ * shared memory allocation (dynamicSMemSize) in bytes, and output it through
126
+ * result along with other useful information. The occupancy is computed in
127
+ * terms of the maximum number of active blocks per multiprocessor. The user can
128
+ * then convert it to other metrics, such as number of active warps.
129
+ *
130
+ * RETURN VALUE
131
+ *
132
+ * The occupancy and related information is returned through result.
133
+ *
134
+ * If result->activeBlocksPerMultiprocessor is 0, then the given parameter
135
+ * combination cannot run on the device.
136
+ *
137
+ * ERRORS
138
+ *
139
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
140
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
141
+ * current implementation or device is invalid
142
+ */
143
+ static __OCC_INLINE
144
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
145
+ cudaOccResult *result, // out
146
+ const cudaOccDeviceProp *properties, // in
147
+ const cudaOccFuncAttributes *attributes, // in
148
+ const cudaOccDeviceState *state, // in
149
+ int blockSize, // in
150
+ size_t dynamicSmemSize); // in
151
+
152
+ /**
153
+ * The CUDA launch configurator C API suggests a grid / block size pair (in
154
+ * minGridSize and blockSize) that achieves the best potential occupancy
155
+ * (i.e. maximum number of active warps with the smallest number of blocks) for
156
+ * the given function described by attributes, on a device described by
157
+ * properties with settings in state.
158
+ *
159
+ * If per-block dynamic shared memory allocation is not needed, the user should
160
+ * leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0.
161
+ *
162
+ * If per-block dynamic shared memory allocation is needed, then if the dynamic
163
+ * shared memory size is constant regardless of block size, the size should be
164
+ * passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be
165
+ * NULL.
166
+ *
167
+ * Otherwise, if the per-block dynamic shared memory size varies with different
168
+ * block sizes, the user needs to provide a pointer to an unary function through
169
+ * blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by
170
+ * a block of the function for any given block size. dynamicSMemSize is
171
+ * ignored. An example signature is:
172
+ *
173
+ * // Take block size, returns dynamic shared memory needed
174
+ * size_t blockToSmem(int blockSize);
175
+ *
176
+ * RETURN VALUE
177
+ *
178
+ * The suggested block size and the minimum number of blocks needed to achieve
179
+ * the maximum occupancy are returned through blockSize and minGridSize.
180
+ *
181
+ * If *blockSize is 0, then the given combination cannot run on the device.
182
+ *
183
+ * ERRORS
184
+ *
185
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
186
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
187
+ * current implementation or device is invalid
188
+ *
189
+ */
190
+ static __OCC_INLINE
191
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
192
+ int *minGridSize, // out
193
+ int *blockSize, // out
194
+ const cudaOccDeviceProp *properties, // in
195
+ const cudaOccFuncAttributes *attributes, // in
196
+ const cudaOccDeviceState *state, // in
197
+ size_t (*blockSizeToDynamicSMemSize)(int), // in
198
+ size_t dynamicSMemSize); // in
199
+
200
+ /**
201
+ * The CUDA launch configurator C++ API suggests a grid / block size pair (in
202
+ * minGridSize and blockSize) that achieves the best potential occupancy
203
+ * (i.e. the maximum number of active warps with the smallest number of blocks)
204
+ * for the given function described by attributes, on a device described by
205
+ * properties with settings in state.
206
+ *
207
+ * If per-block dynamic shared memory allocation is 0 or constant regardless of
208
+ * block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to
209
+ * configure the launch. A constant dynamic shared memory allocation size in
210
+ * bytes can be passed through dynamicSMemSize.
211
+ *
212
+ * Otherwise, if the per-block dynamic shared memory size varies with different
213
+ * block sizes, the user needs to use
214
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a
215
+ * functor / pointer to an unary function (blockSizeToDynamicSMemSize) that
216
+ * computes the dynamic shared memory needed by func for any given block
217
+ * size. An example signature is:
218
+ *
219
+ * // Take block size, returns per-block dynamic shared memory needed
220
+ * size_t blockToSmem(int blockSize);
221
+ *
222
+ * RETURN VALUE
223
+ *
224
+ * The suggested block size and the minimum number of blocks needed to achieve
225
+ * the maximum occupancy are returned through blockSize and minGridSize.
226
+ *
227
+ * If *blockSize is 0, then the given combination cannot run on the device.
228
+ *
229
+ * ERRORS
230
+ *
231
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
232
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
233
+ * current implementation or device is invalid
234
+ *
235
+ */
236
+
237
+ #if defined(__cplusplus)
238
+ namespace {
239
+
240
+ __OCC_INLINE
241
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
242
+ int *minGridSize, // out
243
+ int *blockSize, // out
244
+ const cudaOccDeviceProp *properties, // in
245
+ const cudaOccFuncAttributes *attributes, // in
246
+ const cudaOccDeviceState *state, // in
247
+ size_t dynamicSMemSize = 0); // in
248
+
249
+ template <typename UnaryFunction>
250
+ __OCC_INLINE
251
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
252
+ int *minGridSize, // out
253
+ int *blockSize, // out
254
+ const cudaOccDeviceProp *properties, // in
255
+ const cudaOccFuncAttributes *attributes, // in
256
+ const cudaOccDeviceState *state, // in
257
+ UnaryFunction blockSizeToDynamicSMemSize); // in
258
+
259
+ } // namespace anonymous
260
+ #endif // defined(__cplusplus)
261
+
262
+ /**
263
+ *
264
+ * The CUDA dynamic shared memory calculator computes the maximum size of
265
+ * per-block dynamic shared memory if we want to place numBlocks blocks
266
+ * on an SM.
267
+ *
268
+ * RETURN VALUE
269
+ *
270
+ * Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow
271
+ * numBlocks blocks per SM.
272
+ *
273
+ * ERRORS
274
+ *
275
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
276
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
277
+ * current implementation or device is invalid
278
+ *
279
+ */
280
+ static __OCC_INLINE
281
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
282
+ size_t *dynamicSmemSize,
283
+ const cudaOccDeviceProp *properties,
284
+ const cudaOccFuncAttributes *attributes,
285
+ const cudaOccDeviceState *state,
286
+ int numBlocks,
287
+ int blockSize);
288
+
289
+ /**
290
+ * Data structures
291
+ *
292
+ * These structures are subject to change for future architecture and CUDA
293
+ * releases. C users should initialize the structure as {0}.
294
+ *
295
+ */
296
+
297
+ /**
298
+ * Device descriptor
299
+ *
300
+ * This structure describes a device.
301
+ */
302
+ struct cudaOccDeviceProp {
303
+ int computeMajor; // Compute capability major version
304
+ int computeMinor; // Compute capability minor
305
+ // version. None supported minor version
306
+ // may cause error
307
+ int maxThreadsPerBlock; // Maximum number of threads per block
308
+ int maxThreadsPerMultiprocessor; // Maximum number of threads per SM
309
+ // i.e. (Max. number of warps) x (warp
310
+ // size)
311
+ int regsPerBlock; // Maximum number of registers per block
312
+ int regsPerMultiprocessor; // Maximum number of registers per SM
313
+ int warpSize; // Warp size
314
+ size_t sharedMemPerBlock; // Maximum shared memory size per block
315
+ size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM
316
+ int numSms; // Number of SMs available
317
+ size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block
318
+ size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver
319
+
320
+ #ifdef __cplusplus
321
+ // This structure can be converted from a cudaDeviceProp structure for users
322
+ // that use this header in their CUDA applications.
323
+ //
324
+ // If the application have access to the CUDA Runtime API, the application
325
+ // can obtain the device properties of a CUDA device through
326
+ // cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the
327
+ // cudaDeviceProp structure.
328
+ //
329
+ // Example:
330
+ /*
331
+ {
332
+ cudaDeviceProp prop;
333
+
334
+ cudaGetDeviceProperties(&prop, ...);
335
+
336
+ cudaOccDeviceProp occProp = prop;
337
+
338
+ ...
339
+
340
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...);
341
+ }
342
+ */
343
+ //
344
+ template<typename DeviceProp>
345
+ __OCC_INLINE
346
+ cudaOccDeviceProp(const DeviceProp &props)
347
+ : computeMajor (props.major),
348
+ computeMinor (props.minor),
349
+ maxThreadsPerBlock (props.maxThreadsPerBlock),
350
+ maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor),
351
+ regsPerBlock (props.regsPerBlock),
352
+ regsPerMultiprocessor (props.regsPerMultiprocessor),
353
+ warpSize (props.warpSize),
354
+ sharedMemPerBlock (props.sharedMemPerBlock),
355
+ sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor),
356
+ numSms (props.multiProcessorCount),
357
+ sharedMemPerBlockOptin (props.sharedMemPerBlockOptin),
358
+ reservedSharedMemPerBlock (props.reservedSharedMemPerBlock)
359
+ {}
360
+
361
+ __OCC_INLINE
362
+ cudaOccDeviceProp()
363
+ : computeMajor (0),
364
+ computeMinor (0),
365
+ maxThreadsPerBlock (0),
366
+ maxThreadsPerMultiprocessor (0),
367
+ regsPerBlock (0),
368
+ regsPerMultiprocessor (0),
369
+ warpSize (0),
370
+ sharedMemPerBlock (0),
371
+ sharedMemPerMultiprocessor (0),
372
+ numSms (0),
373
+ sharedMemPerBlockOptin (0),
374
+ reservedSharedMemPerBlock (0)
375
+ {}
376
+ #endif // __cplusplus
377
+ };
378
+
379
+ /**
380
+ * Partitioned global caching option
381
+ */
382
+ typedef enum cudaOccPartitionedGCConfig_enum {
383
+ PARTITIONED_GC_OFF, // Disable partitioned global caching
384
+ PARTITIONED_GC_ON, // Prefer partitioned global caching
385
+ PARTITIONED_GC_ON_STRICT // Force partitioned global caching
386
+ } cudaOccPartitionedGCConfig;
387
+
388
+ /**
389
+ * Per function opt in maximum dynamic shared memory limit
390
+ */
391
+ typedef enum cudaOccFuncShmemConfig_enum {
392
+ FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit
393
+ FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit
394
+ } cudaOccFuncShmemConfig;
395
+
396
+ /**
397
+ * Function descriptor
398
+ *
399
+ * This structure describes a CUDA function.
400
+ */
401
+ struct cudaOccFuncAttributes {
402
+ int maxThreadsPerBlock; // Maximum block size the function can work with. If
403
+ // unlimited, use INT_MAX or any value greater than
404
+ // or equal to maxThreadsPerBlock of the device
405
+ int numRegs; // Number of registers used. When the function is
406
+ // launched on device, the register count may change
407
+ // due to internal tools requirements.
408
+ size_t sharedSizeBytes; // Number of static shared memory used
409
+
410
+ cudaOccPartitionedGCConfig partitionedGCConfig;
411
+ // Partitioned global caching is required to enable
412
+ // caching on certain chips, such as sm_52
413
+ // devices. Partitioned global caching can be
414
+ // automatically disabled if the occupancy
415
+ // requirement of the launch cannot support caching.
416
+ //
417
+ // To override this behavior with caching on and
418
+ // calculate occupancy strictly according to the
419
+ // preference, set partitionedGCConfig to
420
+ // PARTITIONED_GC_ON_STRICT. This is especially
421
+ // useful for experimenting and finding launch
422
+ // configurations (MaxPotentialOccupancyBlockSize)
423
+ // that allow global caching to take effect.
424
+ //
425
+ // This flag only affects the occupancy calculation.
426
+
427
+ cudaOccFuncShmemConfig shmemLimitConfig;
428
+ // Certain chips like sm_70 allow a user to opt into
429
+ // a higher per block limit of dynamic shared memory
430
+ // This optin is performed on a per function basis
431
+ // using the cuFuncSetAttribute function
432
+
433
+ size_t maxDynamicSharedSizeBytes;
434
+ // User set limit on maximum dynamic shared memory
435
+ // usable by the kernel
436
+ // This limit is set using the cuFuncSetAttribute
437
+ // function.
438
+
439
+ int numBlockBarriers; // Number of block barriers used (default to 1)
440
+ #ifdef __cplusplus
441
+ // This structure can be converted from a cudaFuncAttributes structure for
442
+ // users that use this header in their CUDA applications.
443
+ //
444
+ // If the application have access to the CUDA Runtime API, the application
445
+ // can obtain the function attributes of a CUDA kernel function through
446
+ // cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the
447
+ // cudaFuncAttributes structure.
448
+ //
449
+ // Example:
450
+ /*
451
+ __global__ void foo() {...}
452
+
453
+ ...
454
+
455
+ {
456
+ cudaFuncAttributes attr;
457
+
458
+ cudaFuncGetAttributes(&attr, foo);
459
+
460
+ cudaOccFuncAttributes occAttr = attr;
461
+
462
+ ...
463
+
464
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...);
465
+ }
466
+ */
467
+ //
468
+ template<typename FuncAttributes>
469
+ __OCC_INLINE
470
+ cudaOccFuncAttributes(const FuncAttributes &attr)
471
+ : maxThreadsPerBlock (attr.maxThreadsPerBlock),
472
+ numRegs (attr.numRegs),
473
+ sharedSizeBytes (attr.sharedSizeBytes),
474
+ partitionedGCConfig (PARTITIONED_GC_OFF),
475
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN),
476
+ maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes),
477
+ numBlockBarriers (1)
478
+ {}
479
+
480
+ __OCC_INLINE
481
+ cudaOccFuncAttributes()
482
+ : maxThreadsPerBlock (0),
483
+ numRegs (0),
484
+ sharedSizeBytes (0),
485
+ partitionedGCConfig (PARTITIONED_GC_OFF),
486
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT),
487
+ maxDynamicSharedSizeBytes (0),
488
+ numBlockBarriers (0)
489
+ {}
490
+ #endif
491
+ };
492
+
493
+ typedef enum cudaOccCacheConfig_enum {
494
+ CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default)
495
+ CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache
496
+ CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory
497
+ CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory
498
+ } cudaOccCacheConfig;
499
+
500
+ typedef enum cudaOccCarveoutConfig_enum {
501
+ SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default)
502
+ SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache
503
+ SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory
504
+ SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache
505
+ } cudaOccCarveoutConfig;
506
+
507
+ /**
508
+ * Device state descriptor
509
+ *
510
+ * This structure describes device settings that affect occupancy calculation.
511
+ */
512
+ struct cudaOccDeviceState
513
+ {
514
+ // Cache / shared memory split preference. Deprecated on Volta
515
+ cudaOccCacheConfig cacheConfig;
516
+ // Shared memory / L1 split preference. Supported on only Volta
517
+ int carveoutConfig;
518
+
519
+ #ifdef __cplusplus
520
+ __OCC_INLINE
521
+ cudaOccDeviceState()
522
+ : cacheConfig (CACHE_PREFER_NONE),
523
+ carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT)
524
+ {}
525
+ #endif
526
+ };
527
+
528
+ typedef enum cudaOccLimitingFactor_enum {
529
+ // Occupancy limited due to:
530
+ OCC_LIMIT_WARPS = 0x01, // - warps available
531
+ OCC_LIMIT_REGISTERS = 0x02, // - registers available
532
+ OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available
533
+ OCC_LIMIT_BLOCKS = 0x08, // - blocks available
534
+ OCC_LIMIT_BARRIERS = 0x10 // - barrier available
535
+ } cudaOccLimitingFactor;
536
+
537
+ /**
538
+ * Occupancy output
539
+ *
540
+ * This structure contains occupancy calculator's output.
541
+ */
542
+ struct cudaOccResult {
543
+ int activeBlocksPerMultiprocessor; // Occupancy
544
+ unsigned int limitingFactors; // Factors that limited occupancy. A bit
545
+ // field that counts the limiting
546
+ // factors, see cudaOccLimitingFactor
547
+ int blockLimitRegs; // Occupancy due to register
548
+ // usage, INT_MAX if the kernel does not
549
+ // use any register.
550
+ int blockLimitSharedMem; // Occupancy due to shared memory
551
+ // usage, INT_MAX if the kernel does not
552
+ // use shared memory.
553
+ int blockLimitWarps; // Occupancy due to block size limit
554
+ int blockLimitBlocks; // Occupancy due to maximum number of blocks
555
+ // managable per SM
556
+ int blockLimitBarriers; // Occupancy due to block barrier usage
557
+ int allocatedRegistersPerBlock; // Actual number of registers allocated per
558
+ // block
559
+ size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated
560
+ // per block
561
+ cudaOccPartitionedGCConfig partitionedGCConfig;
562
+ // Report if partitioned global caching
563
+ // is actually enabled.
564
+ };
565
+
566
+ /**
567
+ * Partitioned global caching support
568
+ *
569
+ * See cudaOccPartitionedGlobalCachingModeSupport
570
+ */
571
+ typedef enum cudaOccPartitionedGCSupport_enum {
572
+ PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported
573
+ PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported
574
+ } cudaOccPartitionedGCSupport;
575
+
576
+ /**
577
+ * Implementation
578
+ */
579
+
580
+ /**
581
+ * Max compute capability supported
582
+ */
583
+ #define __CUDA_OCC_MAJOR__ 9
584
+ #define __CUDA_OCC_MINOR__ 0
585
+
586
+ //////////////////////////////////////////
587
+ // Mathematical Helper Functions //
588
+ //////////////////////////////////////////
589
+
590
+ static __OCC_INLINE int __occMin(int lhs, int rhs)
591
+ {
592
+ return rhs < lhs ? rhs : lhs;
593
+ }
594
+
595
+ static __OCC_INLINE int __occDivideRoundUp(int x, int y)
596
+ {
597
+ return (x + (y - 1)) / y;
598
+ }
599
+
600
+ static __OCC_INLINE int __occRoundUp(int x, int y)
601
+ {
602
+ return y * __occDivideRoundUp(x, y);
603
+ }
604
+
605
+ //////////////////////////////////////////
606
+ // Architectural Properties //
607
+ //////////////////////////////////////////
608
+
609
+ /**
610
+ * Granularity of shared memory allocation
611
+ */
612
+ static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
613
+ {
614
+ int value;
615
+
616
+ switch(properties->computeMajor) {
617
+ case 3:
618
+ case 5:
619
+ case 6:
620
+ case 7:
621
+ value = 256;
622
+ break;
623
+ case 8:
624
+ case 9:
625
+ value = 128;
626
+ break;
627
+ default:
628
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
629
+ }
630
+
631
+ *limit = value;
632
+
633
+ return CUDA_OCC_SUCCESS;
634
+ }
635
+
636
+ /**
637
+ * Maximum number of registers per thread
638
+ */
639
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties)
640
+ {
641
+ int value;
642
+
643
+ switch(properties->computeMajor) {
644
+ case 3:
645
+ case 5:
646
+ case 6:
647
+ value = 255;
648
+ break;
649
+ case 7:
650
+ case 8:
651
+ case 9:
652
+ value = 256;
653
+ break;
654
+ default:
655
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
656
+ }
657
+
658
+ *limit = value;
659
+
660
+ return CUDA_OCC_SUCCESS;
661
+ }
662
+
663
+ /**
664
+ * Granularity of register allocation
665
+ */
666
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
667
+ {
668
+ int value;
669
+
670
+ switch(properties->computeMajor) {
671
+ case 3:
672
+ case 5:
673
+ case 6:
674
+ case 7:
675
+ case 8:
676
+ case 9:
677
+ value = 256;
678
+ break;
679
+ default:
680
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
681
+ }
682
+
683
+ *limit = value;
684
+
685
+ return CUDA_OCC_SUCCESS;
686
+ }
687
+
688
+ /**
689
+ * Number of sub-partitions
690
+ */
691
+ static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties)
692
+ {
693
+ int value;
694
+
695
+ switch(properties->computeMajor) {
696
+ case 3:
697
+ case 5:
698
+ case 7:
699
+ case 8:
700
+ case 9:
701
+ value = 4;
702
+ break;
703
+ case 6:
704
+ value = properties->computeMinor ? 4 : 2;
705
+ break;
706
+ default:
707
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
708
+ }
709
+
710
+ *limit = value;
711
+
712
+ return CUDA_OCC_SUCCESS;
713
+ }
714
+
715
+
716
+ /**
717
+ * Maximum number of blocks that can run simultaneously on a multiprocessor
718
+ */
719
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties)
720
+ {
721
+ int value;
722
+
723
+ switch(properties->computeMajor) {
724
+ case 3:
725
+ value = 16;
726
+ break;
727
+ case 5:
728
+ case 6:
729
+ value = 32;
730
+ break;
731
+ case 7: {
732
+ int isTuring = properties->computeMinor == 5;
733
+ value = (isTuring) ? 16 : 32;
734
+ break;
735
+ }
736
+ case 8:
737
+ if (properties->computeMinor == 0) {
738
+ value = 32;
739
+ }
740
+ else if (properties->computeMinor == 9) {
741
+ value = 24;
742
+ }
743
+ else {
744
+ value = 16;
745
+ }
746
+ break;
747
+ case 9:
748
+ value = 32;
749
+ break;
750
+ default:
751
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
752
+ }
753
+
754
+ *limit = value;
755
+
756
+ return CUDA_OCC_SUCCESS;
757
+ }
758
+
759
+ /**
760
+ * Align up shared memory based on compute major configurations
761
+ */
762
+ static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties)
763
+ {
764
+ // Volta and Turing have shared L1 cache / shared memory, and support cache
765
+ // configuration to trade one for the other. These values are needed to
766
+ // map carveout config ratio to the next available architecture size
767
+ size_t size = *shMemSize;
768
+
769
+ switch (properties->computeMajor) {
770
+ case 7: {
771
+ // Turing supports 32KB and 64KB shared mem.
772
+ int isTuring = properties->computeMinor == 5;
773
+ if (isTuring) {
774
+ if (size <= 32 * 1024) {
775
+ *shMemSize = 32 * 1024;
776
+ }
777
+ else if (size <= 64 * 1024) {
778
+ *shMemSize = 64 * 1024;
779
+ }
780
+ else {
781
+ return CUDA_OCC_ERROR_INVALID_INPUT;
782
+ }
783
+ }
784
+ // Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem.
785
+ else {
786
+ if (size == 0) {
787
+ *shMemSize = 0;
788
+ }
789
+ else if (size <= 8 * 1024) {
790
+ *shMemSize = 8 * 1024;
791
+ }
792
+ else if (size <= 16 * 1024) {
793
+ *shMemSize = 16 * 1024;
794
+ }
795
+ else if (size <= 32 * 1024) {
796
+ *shMemSize = 32 * 1024;
797
+ }
798
+ else if (size <= 64 * 1024) {
799
+ *shMemSize = 64 * 1024;
800
+ }
801
+ else if (size <= 96 * 1024) {
802
+ *shMemSize = 96 * 1024;
803
+ }
804
+ else {
805
+ return CUDA_OCC_ERROR_INVALID_INPUT;
806
+ }
807
+ }
808
+ break;
809
+ }
810
+ case 8:
811
+ if (properties->computeMinor == 0 || properties->computeMinor == 7) {
812
+ if (size == 0) {
813
+ *shMemSize = 0;
814
+ }
815
+ else if (size <= 8 * 1024) {
816
+ *shMemSize = 8 * 1024;
817
+ }
818
+ else if (size <= 16 * 1024) {
819
+ *shMemSize = 16 * 1024;
820
+ }
821
+ else if (size <= 32 * 1024) {
822
+ *shMemSize = 32 * 1024;
823
+ }
824
+ else if (size <= 64 * 1024) {
825
+ *shMemSize = 64 * 1024;
826
+ }
827
+ else if (size <= 100 * 1024) {
828
+ *shMemSize = 100 * 1024;
829
+ }
830
+ else if (size <= 132 * 1024) {
831
+ *shMemSize = 132 * 1024;
832
+ }
833
+ else if (size <= 164 * 1024) {
834
+ *shMemSize = 164 * 1024;
835
+ }
836
+ else {
837
+ return CUDA_OCC_ERROR_INVALID_INPUT;
838
+ }
839
+ }
840
+ else {
841
+ if (size == 0) {
842
+ *shMemSize = 0;
843
+ }
844
+ else if (size <= 8 * 1024) {
845
+ *shMemSize = 8 * 1024;
846
+ }
847
+ else if (size <= 16 * 1024) {
848
+ *shMemSize = 16 * 1024;
849
+ }
850
+ else if (size <= 32 * 1024) {
851
+ *shMemSize = 32 * 1024;
852
+ }
853
+ else if (size <= 64 * 1024) {
854
+ *shMemSize = 64 * 1024;
855
+ }
856
+ else if (size <= 100 * 1024) {
857
+ *shMemSize = 100 * 1024;
858
+ }
859
+ else {
860
+ return CUDA_OCC_ERROR_INVALID_INPUT;
861
+ }
862
+ }
863
+ break;
864
+ case 9: {
865
+ if (size == 0) {
866
+ *shMemSize = 0;
867
+ }
868
+ else if (size <= 8 * 1024) {
869
+ *shMemSize = 8 * 1024;
870
+ }
871
+ else if (size <= 16 * 1024) {
872
+ *shMemSize = 16 * 1024;
873
+ }
874
+ else if (size <= 32 * 1024) {
875
+ *shMemSize = 32 * 1024;
876
+ }
877
+ else if (size <= 64 * 1024) {
878
+ *shMemSize = 64 * 1024;
879
+ }
880
+ else if (size <= 100 * 1024) {
881
+ *shMemSize = 100 * 1024;
882
+ }
883
+ else if (size <= 132 * 1024) {
884
+ *shMemSize = 132 * 1024;
885
+ }
886
+ else if (size <= 164 * 1024) {
887
+ *shMemSize = 164 * 1024;
888
+ }
889
+ else if (size <= 196 * 1024) {
890
+ *shMemSize = 196 * 1024;
891
+ }
892
+ else if (size <= 228 * 1024) {
893
+ *shMemSize = 228 * 1024;
894
+ }
895
+ else {
896
+ return CUDA_OCC_ERROR_INVALID_INPUT;
897
+ }
898
+ break;
899
+ }
900
+ default:
901
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
902
+ }
903
+
904
+ return CUDA_OCC_SUCCESS;
905
+ }
906
+
907
+ /**
908
+ * Shared memory based on the new carveoutConfig API introduced with Volta
909
+ */
910
+ static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
911
+ {
912
+ cudaOccError status = CUDA_OCC_SUCCESS;
913
+ size_t preferenceShmemSize;
914
+
915
+ // CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported
916
+ // devices. This preference will take precedence over the older cacheConfig setting.
917
+ // Map cacheConfig to its effective preference value.
918
+ int effectivePreference = state->carveoutConfig;
919
+ if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) {
920
+ return CUDA_OCC_ERROR_INVALID_INPUT;
921
+ }
922
+
923
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
924
+ switch (state->cacheConfig)
925
+ {
926
+ case CACHE_PREFER_L1:
927
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1;
928
+ break;
929
+ case CACHE_PREFER_SHARED:
930
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED;
931
+ break;
932
+ case CACHE_PREFER_EQUAL:
933
+ effectivePreference = SHAREDMEM_CARVEOUT_HALF;
934
+ break;
935
+ default:
936
+ effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT;
937
+ break;
938
+ }
939
+ }
940
+
941
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
942
+ preferenceShmemSize = properties->sharedMemPerMultiprocessor;
943
+ }
944
+ else {
945
+ preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100;
946
+ }
947
+
948
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties);
949
+ *limit = preferenceShmemSize;
950
+ return status;
951
+ }
952
+
953
+ /**
954
+ * Shared memory based on the cacheConfig
955
+ */
956
+ static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
957
+ {
958
+ size_t bytes = 0;
959
+ size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor;
960
+ cudaOccCacheConfig cacheConfig = state->cacheConfig;
961
+
962
+ // Kepler has shared L1 cache / shared memory, and support cache
963
+ // configuration to trade one for the other. These values are needed to
964
+ // calculate the correct shared memory size for user requested cache
965
+ // configuration.
966
+ //
967
+ size_t minCacheSize = 16384;
968
+ size_t maxCacheSize = 49152;
969
+ size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize;
970
+ size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize;
971
+
972
+ switch (properties->computeMajor) {
973
+ case 3:
974
+ // Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest
975
+ // is shared memory.
976
+ //
977
+ switch (cacheConfig) {
978
+ default :
979
+ case CACHE_PREFER_NONE:
980
+ case CACHE_PREFER_SHARED:
981
+ bytes = sharedMemPerMultiprocessorHigh;
982
+ break;
983
+ case CACHE_PREFER_L1:
984
+ bytes = sharedMemPerMultiprocessorLow;
985
+ break;
986
+ case CACHE_PREFER_EQUAL:
987
+ // Equal is the mid-point between high and low. It should be
988
+ // equivalent to low + 16KB.
989
+ //
990
+ bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2;
991
+ break;
992
+ }
993
+ break;
994
+ case 5:
995
+ case 6:
996
+ // Maxwell and Pascal have dedicated shared memory.
997
+ //
998
+ bytes = sharedMemPerMultiprocessorHigh;
999
+ break;
1000
+ default:
1001
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1002
+ }
1003
+
1004
+ *limit = bytes;
1005
+
1006
+ return CUDA_OCC_SUCCESS;
1007
+ }
1008
+
1009
+ /**
1010
+ * Shared memory based on config requested by User
1011
+ */
1012
+ static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
1013
+ {
1014
+ // Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference,
1015
+ // it is handled separately from the cache config preference.
1016
+ if (properties->computeMajor >= 7) {
1017
+ return cudaOccSMemPreferenceVoltaPlus(limit, properties, state);
1018
+ }
1019
+ return cudaOccSMemPreference(limit, properties, state);
1020
+ }
1021
+
1022
+ /**
1023
+ * Return the per block shared memory limit based on function config
1024
+ */
1025
+ static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta)
1026
+ {
1027
+ switch (properties->computeMajor) {
1028
+ case 2:
1029
+ case 3:
1030
+ case 4:
1031
+ case 5:
1032
+ case 6:
1033
+ *limit = properties->sharedMemPerBlock;
1034
+ break;
1035
+ case 7:
1036
+ case 8:
1037
+ case 9:
1038
+ switch (shmemLimitConfig) {
1039
+ default:
1040
+ case FUNC_SHMEM_LIMIT_DEFAULT:
1041
+ *limit = properties->sharedMemPerBlock;
1042
+ break;
1043
+ case FUNC_SHMEM_LIMIT_OPTIN:
1044
+ if (smemPerCta > properties->sharedMemPerBlock) {
1045
+ *limit = properties->sharedMemPerBlockOptin;
1046
+ }
1047
+ else {
1048
+ *limit = properties->sharedMemPerBlock;
1049
+ }
1050
+ break;
1051
+ }
1052
+ break;
1053
+ default:
1054
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1055
+ }
1056
+
1057
+ // Starting Ampere, CUDA driver reserves additional shared memory per block
1058
+ if (properties->computeMajor >= 8) {
1059
+ *limit += properties->reservedSharedMemPerBlock;
1060
+ }
1061
+
1062
+ return CUDA_OCC_SUCCESS;
1063
+ }
1064
+
1065
+ /**
1066
+ * Partitioned global caching mode support
1067
+ */
1068
+ static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties)
1069
+ {
1070
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1071
+
1072
+ if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) ||
1073
+ properties->computeMajor == 6) {
1074
+ *limit = PARTITIONED_GC_SUPPORTED;
1075
+ }
1076
+
1077
+ if (properties->computeMajor == 6 && properties->computeMinor == 0) {
1078
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1079
+ }
1080
+
1081
+ return CUDA_OCC_SUCCESS;
1082
+ }
1083
+
1084
+ ///////////////////////////////////////////////
1085
+ // User Input Sanity //
1086
+ ///////////////////////////////////////////////
1087
+
1088
+ static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties)
1089
+ {
1090
+ // Verify device properties
1091
+ //
1092
+ // Each of these limits must be a positive number.
1093
+ //
1094
+ // Compute capacity is checked during the occupancy calculation
1095
+ //
1096
+ if (properties->maxThreadsPerBlock <= 0 ||
1097
+ properties->maxThreadsPerMultiprocessor <= 0 ||
1098
+ properties->regsPerBlock <= 0 ||
1099
+ properties->regsPerMultiprocessor <= 0 ||
1100
+ properties->warpSize <= 0 ||
1101
+ properties->sharedMemPerBlock <= 0 ||
1102
+ properties->sharedMemPerMultiprocessor <= 0 ||
1103
+ properties->numSms <= 0) {
1104
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1105
+ }
1106
+
1107
+ return CUDA_OCC_SUCCESS;
1108
+ }
1109
+
1110
+ static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes)
1111
+ {
1112
+ // Verify function attributes
1113
+ //
1114
+ if (attributes->maxThreadsPerBlock <= 0 ||
1115
+ attributes->numRegs < 0) { // Compiler may choose not to use
1116
+ // any register (empty kernels,
1117
+ // etc.)
1118
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1119
+ }
1120
+
1121
+ return CUDA_OCC_SUCCESS;
1122
+ }
1123
+
1124
+ static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state)
1125
+ {
1126
+ (void)state; // silence unused-variable warning
1127
+ // Placeholder
1128
+ //
1129
+
1130
+ return CUDA_OCC_SUCCESS;
1131
+ }
1132
+
1133
+ static __OCC_INLINE cudaOccError cudaOccInputCheck(
1134
+ const cudaOccDeviceProp *properties,
1135
+ const cudaOccFuncAttributes *attributes,
1136
+ const cudaOccDeviceState *state)
1137
+ {
1138
+ cudaOccError status = CUDA_OCC_SUCCESS;
1139
+
1140
+ status = cudaOccDevicePropCheck(properties);
1141
+ if (status != CUDA_OCC_SUCCESS) {
1142
+ return status;
1143
+ }
1144
+
1145
+ status = cudaOccFuncAttributesCheck(attributes);
1146
+ if (status != CUDA_OCC_SUCCESS) {
1147
+ return status;
1148
+ }
1149
+
1150
+ status = cudaOccDeviceStateCheck(state);
1151
+ if (status != CUDA_OCC_SUCCESS) {
1152
+ return status;
1153
+ }
1154
+
1155
+ return status;
1156
+ }
1157
+
1158
+ ///////////////////////////////////////////////
1159
+ // Occupancy calculation Functions //
1160
+ ///////////////////////////////////////////////
1161
+
1162
+ static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected(
1163
+ const cudaOccDeviceProp *properties,
1164
+ const cudaOccFuncAttributes *attributes)
1165
+ {
1166
+ cudaOccPartitionedGCSupport gcSupport;
1167
+ cudaOccPartitionedGCConfig gcConfig;
1168
+
1169
+ cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties);
1170
+
1171
+ gcConfig = attributes->partitionedGCConfig;
1172
+
1173
+ if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) {
1174
+ gcConfig = PARTITIONED_GC_OFF;
1175
+ }
1176
+
1177
+ return gcConfig;
1178
+ }
1179
+
1180
+ // Warp limit
1181
+ //
1182
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit(
1183
+ int *limit,
1184
+ cudaOccPartitionedGCConfig gcConfig,
1185
+ const cudaOccDeviceProp *properties,
1186
+ const cudaOccFuncAttributes *attributes,
1187
+ int blockSize)
1188
+ {
1189
+ cudaOccError status = CUDA_OCC_SUCCESS;
1190
+ int maxWarpsPerSm;
1191
+ int warpsAllocatedPerCTA;
1192
+ int maxBlocks;
1193
+ (void)attributes; // silence unused-variable warning
1194
+
1195
+ if (blockSize > properties->maxThreadsPerBlock) {
1196
+ maxBlocks = 0;
1197
+ }
1198
+ else {
1199
+ maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize;
1200
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1201
+ maxBlocks = 0;
1202
+
1203
+ if (gcConfig != PARTITIONED_GC_OFF) {
1204
+ int maxBlocksPerSmPartition;
1205
+ int maxWarpsPerSmPartition;
1206
+
1207
+ // If partitioned global caching is on, then a CTA can only use a SM
1208
+ // partition (a half SM), and thus a half of the warp slots
1209
+ // available per SM
1210
+ //
1211
+ maxWarpsPerSmPartition = maxWarpsPerSm / 2;
1212
+ maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA;
1213
+ maxBlocks = maxBlocksPerSmPartition * 2;
1214
+ }
1215
+ // On hardware that supports partitioned global caching, each half SM is
1216
+ // guaranteed to support at least 32 warps (maximum number of warps of a
1217
+ // CTA), so caching will not cause 0 occupancy due to insufficient warp
1218
+ // allocation slots.
1219
+ //
1220
+ else {
1221
+ maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA;
1222
+ }
1223
+ }
1224
+
1225
+ *limit = maxBlocks;
1226
+
1227
+ return status;
1228
+ }
1229
+
1230
+ // Shared memory limit
1231
+ //
1232
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit(
1233
+ int *limit,
1234
+ cudaOccResult *result,
1235
+ const cudaOccDeviceProp *properties,
1236
+ const cudaOccFuncAttributes *attributes,
1237
+ const cudaOccDeviceState *state,
1238
+ int blockSize,
1239
+ size_t dynamicSmemSize)
1240
+ {
1241
+ cudaOccError status = CUDA_OCC_SUCCESS;
1242
+ int allocationGranularity;
1243
+ size_t userSmemPreference = 0;
1244
+ size_t totalSmemUsagePerCTA;
1245
+ size_t maxSmemUsagePerCTA;
1246
+ size_t smemAllocatedPerCTA;
1247
+ size_t staticSmemSize;
1248
+ size_t sharedMemPerMultiprocessor;
1249
+ size_t smemLimitPerCTA;
1250
+ int maxBlocks;
1251
+ int dynamicSmemSizeExceeded = 0;
1252
+ int totalSmemSizeExceeded = 0;
1253
+ (void)blockSize; // silence unused-variable warning
1254
+
1255
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1256
+ if (status != CUDA_OCC_SUCCESS) {
1257
+ return status;
1258
+ }
1259
+
1260
+ // Obtain the user preferred shared memory size. This setting is ignored if
1261
+ // user requests more shared memory than preferred.
1262
+ //
1263
+ status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1264
+ if (status != CUDA_OCC_SUCCESS) {
1265
+ return status;
1266
+ }
1267
+
1268
+ staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock;
1269
+ totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize;
1270
+ smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity);
1271
+
1272
+ maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes;
1273
+
1274
+ dynamicSmemSizeExceeded = 0;
1275
+ totalSmemSizeExceeded = 0;
1276
+
1277
+ // Obtain the user set maximum dynamic size if it exists
1278
+ // If so, the current launch dynamic shared memory must not
1279
+ // exceed the set limit
1280
+ if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT &&
1281
+ dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) {
1282
+ dynamicSmemSizeExceeded = 1;
1283
+ }
1284
+
1285
+ status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA);
1286
+ if (status != CUDA_OCC_SUCCESS) {
1287
+ return status;
1288
+ }
1289
+
1290
+ if (smemAllocatedPerCTA > smemLimitPerCTA) {
1291
+ totalSmemSizeExceeded = 1;
1292
+ }
1293
+
1294
+ if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) {
1295
+ maxBlocks = 0;
1296
+ }
1297
+ else {
1298
+ // User requested shared memory limit is used as long as it is greater
1299
+ // than the total shared memory used per CTA, i.e. as long as at least
1300
+ // one CTA can be launched.
1301
+ if (userSmemPreference >= smemAllocatedPerCTA) {
1302
+ sharedMemPerMultiprocessor = userSmemPreference;
1303
+ }
1304
+ else {
1305
+ // On Volta+, user requested shared memory will limit occupancy
1306
+ // if it's less than shared memory per CTA. Otherwise, the
1307
+ // maximum shared memory limit is used.
1308
+ if (properties->computeMajor >= 7) {
1309
+ sharedMemPerMultiprocessor = smemAllocatedPerCTA;
1310
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties);
1311
+ if (status != CUDA_OCC_SUCCESS) {
1312
+ return status;
1313
+ }
1314
+ }
1315
+ else {
1316
+ sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor;
1317
+ }
1318
+ }
1319
+
1320
+ if (smemAllocatedPerCTA > 0) {
1321
+ maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA);
1322
+ }
1323
+ else {
1324
+ maxBlocks = INT_MAX;
1325
+ }
1326
+ }
1327
+
1328
+ result->allocatedSharedMemPerBlock = smemAllocatedPerCTA;
1329
+
1330
+ *limit = maxBlocks;
1331
+
1332
+ return status;
1333
+ }
1334
+
1335
+ static __OCC_INLINE
1336
+ cudaOccError cudaOccMaxBlocksPerSMRegsLimit(
1337
+ int *limit,
1338
+ cudaOccPartitionedGCConfig *gcConfig,
1339
+ cudaOccResult *result,
1340
+ const cudaOccDeviceProp *properties,
1341
+ const cudaOccFuncAttributes *attributes,
1342
+ int blockSize)
1343
+ {
1344
+ cudaOccError status = CUDA_OCC_SUCCESS;
1345
+ int allocationGranularity;
1346
+ int warpsAllocatedPerCTA;
1347
+ int regsAllocatedPerCTA;
1348
+ int regsAssumedPerCTA;
1349
+ int regsPerWarp;
1350
+ int regsAllocatedPerWarp;
1351
+ int numSubPartitions;
1352
+ int numRegsPerSubPartition;
1353
+ int numWarpsPerSubPartition;
1354
+ int numWarpsPerSM;
1355
+ int maxBlocks;
1356
+ int maxRegsPerThread;
1357
+
1358
+ status = cudaOccRegAllocationGranularity(
1359
+ &allocationGranularity,
1360
+ properties);
1361
+ if (status != CUDA_OCC_SUCCESS) {
1362
+ return status;
1363
+ }
1364
+
1365
+ status = cudaOccRegAllocationMaxPerThread(
1366
+ &maxRegsPerThread,
1367
+ properties);
1368
+ if (status != CUDA_OCC_SUCCESS) {
1369
+ return status;
1370
+ }
1371
+
1372
+ status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties);
1373
+ if (status != CUDA_OCC_SUCCESS) {
1374
+ return status;
1375
+ }
1376
+
1377
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1378
+
1379
+ // GPUs of compute capability 2.x and higher allocate registers to warps
1380
+ //
1381
+ // Number of regs per warp is regs per thread x warp size, rounded up to
1382
+ // register allocation granularity
1383
+ //
1384
+ regsPerWarp = attributes->numRegs * properties->warpSize;
1385
+ regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity);
1386
+ regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA;
1387
+
1388
+ // Hardware verifies if a launch fits the per-CTA register limit. For
1389
+ // historical reasons, the verification logic assumes register
1390
+ // allocations are made to all partitions simultaneously. Therefore, to
1391
+ // simulate the hardware check, the warp allocation needs to be rounded
1392
+ // up to the number of partitions.
1393
+ //
1394
+ regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions);
1395
+
1396
+ if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check
1397
+ properties->regsPerBlock < regsAllocatedPerCTA || // Software check
1398
+ attributes->numRegs > maxRegsPerThread) { // Per thread limit check
1399
+ maxBlocks = 0;
1400
+ }
1401
+ else {
1402
+ if (regsAllocatedPerWarp > 0) {
1403
+ // Registers are allocated in each sub-partition. The max number
1404
+ // of warps that can fit on an SM is equal to the max number of
1405
+ // warps per sub-partition x number of sub-partitions.
1406
+ //
1407
+ numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions;
1408
+ numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp;
1409
+
1410
+ maxBlocks = 0;
1411
+
1412
+ if (*gcConfig != PARTITIONED_GC_OFF) {
1413
+ int numSubPartitionsPerSmPartition;
1414
+ int numWarpsPerSmPartition;
1415
+ int maxBlocksPerSmPartition;
1416
+
1417
+ // If partitioned global caching is on, then a CTA can only
1418
+ // use a half SM, and thus a half of the registers available
1419
+ // per SM
1420
+ //
1421
+ numSubPartitionsPerSmPartition = numSubPartitions / 2;
1422
+ numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition;
1423
+ maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA;
1424
+ maxBlocks = maxBlocksPerSmPartition * 2;
1425
+ }
1426
+
1427
+ // Try again if partitioned global caching is not enabled, or if
1428
+ // the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter
1429
+ // case, the device will automatically turn off caching, except
1430
+ // if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate
1431
+ // occupancy and launch configuration.
1432
+ //
1433
+ if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) {
1434
+ // In case *gcConfig was PARTITIONED_GC_ON flip it OFF since
1435
+ // this is what it will be if we spread CTA across partitions.
1436
+ //
1437
+ *gcConfig = PARTITIONED_GC_OFF;
1438
+ numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions;
1439
+ maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA;
1440
+ }
1441
+ }
1442
+ else {
1443
+ maxBlocks = INT_MAX;
1444
+ }
1445
+ }
1446
+
1447
+
1448
+ result->allocatedRegistersPerBlock = regsAllocatedPerCTA;
1449
+
1450
+ *limit = maxBlocks;
1451
+
1452
+ return status;
1453
+ }
1454
+
1455
+ // Barrier limit
1456
+ //
1457
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMBlockBarrierLimit(
1458
+ int *limit,
1459
+ int ctaLimitBlocks,
1460
+ const cudaOccFuncAttributes *attributes)
1461
+ {
1462
+ cudaOccError status = CUDA_OCC_SUCCESS;
1463
+ int numBarriersAvailable = ctaLimitBlocks * 2;
1464
+ int numBarriersUsed = attributes->numBlockBarriers;
1465
+ int maxBlocks = INT_MAX;
1466
+
1467
+ if (numBarriersUsed) {
1468
+ maxBlocks = numBarriersAvailable / numBarriersUsed;
1469
+ }
1470
+
1471
+ *limit = maxBlocks;
1472
+
1473
+ return status;
1474
+ }
1475
+
1476
+ ///////////////////////////////////
1477
+ // API Implementations //
1478
+ ///////////////////////////////////
1479
+
1480
+ static __OCC_INLINE
1481
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
1482
+ cudaOccResult *result,
1483
+ const cudaOccDeviceProp *properties,
1484
+ const cudaOccFuncAttributes *attributes,
1485
+ const cudaOccDeviceState *state,
1486
+ int blockSize,
1487
+ size_t dynamicSmemSize)
1488
+ {
1489
+ cudaOccError status = CUDA_OCC_SUCCESS;
1490
+ int ctaLimitWarps = 0;
1491
+ int ctaLimitBlocks = 0;
1492
+ int ctaLimitSMem = 0;
1493
+ int ctaLimitRegs = 0;
1494
+ int ctaLimitBars = 0;
1495
+ int ctaLimit = 0;
1496
+ unsigned int limitingFactors = 0;
1497
+
1498
+ cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF;
1499
+
1500
+ if (!result || !properties || !attributes || !state || blockSize <= 0) {
1501
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1502
+ }
1503
+
1504
+ ///////////////////////////
1505
+ // Check user input
1506
+ ///////////////////////////
1507
+
1508
+ status = cudaOccInputCheck(properties, attributes, state);
1509
+ if (status != CUDA_OCC_SUCCESS) {
1510
+ return status;
1511
+ }
1512
+
1513
+ ///////////////////////////
1514
+ // Initialization
1515
+ ///////////////////////////
1516
+
1517
+ gcConfig = cudaOccPartitionedGCExpected(properties, attributes);
1518
+
1519
+ ///////////////////////////
1520
+ // Compute occupancy
1521
+ ///////////////////////////
1522
+
1523
+ // Limits due to registers/SM
1524
+ // Also compute if partitioned global caching has to be turned off
1525
+ //
1526
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize);
1527
+ if (status != CUDA_OCC_SUCCESS) {
1528
+ return status;
1529
+ }
1530
+
1531
+ // SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4.
1532
+ // As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x.
1533
+ // For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0),
1534
+ // we do not let it run on any Pascal processor, even though it may be able to run on GP100.
1535
+ // Therefore, we check the occupancy on GP10x when it can run on GP100
1536
+ //
1537
+ if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) {
1538
+ cudaOccDeviceProp propertiesGP10x;
1539
+ cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig;
1540
+ int ctaLimitRegsGP10x = 0;
1541
+
1542
+ // Set up properties for GP10x
1543
+ memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x));
1544
+ propertiesGP10x.computeMinor = 1;
1545
+
1546
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize);
1547
+ if (status != CUDA_OCC_SUCCESS) {
1548
+ return status;
1549
+ }
1550
+
1551
+ if (ctaLimitRegsGP10x == 0) {
1552
+ ctaLimitRegs = 0;
1553
+ }
1554
+ }
1555
+
1556
+ // Limits due to warps/SM
1557
+ //
1558
+ status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize);
1559
+ if (status != CUDA_OCC_SUCCESS) {
1560
+ return status;
1561
+ }
1562
+
1563
+ // Limits due to blocks/SM
1564
+ //
1565
+ status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties);
1566
+ if (status != CUDA_OCC_SUCCESS) {
1567
+ return status;
1568
+ }
1569
+
1570
+ // Limits due to shared memory/SM
1571
+ //
1572
+ status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize);
1573
+ if (status != CUDA_OCC_SUCCESS) {
1574
+ return status;
1575
+ }
1576
+
1577
+ ///////////////////////////
1578
+ // Overall occupancy
1579
+ ///////////////////////////
1580
+
1581
+ // Overall limit is min() of limits due to above reasons
1582
+ //
1583
+ ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks)));
1584
+
1585
+ // Determine occupancy limiting factors
1586
+ //
1587
+ if (ctaLimit == ctaLimitWarps) {
1588
+ limitingFactors |= OCC_LIMIT_WARPS;
1589
+ }
1590
+ if (ctaLimit == ctaLimitRegs) {
1591
+ limitingFactors |= OCC_LIMIT_REGISTERS;
1592
+ }
1593
+ if (ctaLimit == ctaLimitSMem) {
1594
+ limitingFactors |= OCC_LIMIT_SHARED_MEMORY;
1595
+ }
1596
+ if (ctaLimit == ctaLimitBlocks) {
1597
+ limitingFactors |= OCC_LIMIT_BLOCKS;
1598
+ }
1599
+
1600
+ // For Hopper onwards compute the limits to occupancy based on block barrier count
1601
+ //
1602
+ if (properties->computeMajor >= 9 && attributes->numBlockBarriers > 0) {
1603
+ // Limits due to barrier/SM
1604
+ //
1605
+ status = cudaOccMaxBlocksPerSMBlockBarrierLimit(&ctaLimitBars, ctaLimitBlocks, attributes);
1606
+ if (status != CUDA_OCC_SUCCESS) {
1607
+ return status;
1608
+ }
1609
+
1610
+ // Recompute overall limit based on barrier/SM
1611
+ //
1612
+ ctaLimit = __occMin(ctaLimitBars, ctaLimit);
1613
+
1614
+ // Determine if this is occupancy limiting factor
1615
+ //
1616
+ if (ctaLimit == ctaLimitBars) {
1617
+ limitingFactors |= OCC_LIMIT_BARRIERS;
1618
+ }
1619
+ }
1620
+ else {
1621
+ ctaLimitBars = INT_MAX;
1622
+ }
1623
+
1624
+ // Fill in the return values
1625
+ //
1626
+ result->limitingFactors = limitingFactors;
1627
+
1628
+ result->blockLimitRegs = ctaLimitRegs;
1629
+ result->blockLimitSharedMem = ctaLimitSMem;
1630
+ result->blockLimitWarps = ctaLimitWarps;
1631
+ result->blockLimitBlocks = ctaLimitBlocks;
1632
+ result->blockLimitBarriers = ctaLimitBars;
1633
+ result->partitionedGCConfig = gcConfig;
1634
+
1635
+ // Final occupancy
1636
+ result->activeBlocksPerMultiprocessor = ctaLimit;
1637
+
1638
+ return CUDA_OCC_SUCCESS;
1639
+ }
1640
+
1641
+ static __OCC_INLINE
1642
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
1643
+ size_t *bytesAvailable,
1644
+ const cudaOccDeviceProp *properties,
1645
+ const cudaOccFuncAttributes *attributes,
1646
+ const cudaOccDeviceState *state,
1647
+ int numBlocks,
1648
+ int blockSize)
1649
+ {
1650
+ int allocationGranularity;
1651
+ size_t smemLimitPerBlock;
1652
+ size_t smemAvailableForDynamic;
1653
+ size_t userSmemPreference = 0;
1654
+ size_t sharedMemPerMultiprocessor;
1655
+ cudaOccResult result;
1656
+ cudaOccError status = CUDA_OCC_SUCCESS;
1657
+
1658
+ if (numBlocks <= 0)
1659
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1660
+
1661
+ // First compute occupancy of potential kernel launch.
1662
+ //
1663
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0);
1664
+ if (status != CUDA_OCC_SUCCESS) {
1665
+ return status;
1666
+ }
1667
+ // Check if occupancy is achievable given user requested number of blocks.
1668
+ //
1669
+ if (result.activeBlocksPerMultiprocessor < numBlocks) {
1670
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1671
+ }
1672
+
1673
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1674
+ if (status != CUDA_OCC_SUCCESS) {
1675
+ return status;
1676
+ }
1677
+
1678
+ // Return the per block shared memory limit based on function config.
1679
+ //
1680
+ status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor);
1681
+ if (status != CUDA_OCC_SUCCESS) {
1682
+ return status;
1683
+ }
1684
+
1685
+ // If there is only a single block needed per SM, then the user preference can be ignored and the fully SW
1686
+ // limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user
1687
+ // preference sets the total limit of available shared memory.
1688
+ //
1689
+ cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1690
+ if (numBlocks == 1) {
1691
+ sharedMemPerMultiprocessor = smemLimitPerBlock;
1692
+ }
1693
+ else {
1694
+ if (!userSmemPreference) {
1695
+ userSmemPreference = 1 ;
1696
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties);
1697
+ if (status != CUDA_OCC_SUCCESS) {
1698
+ return status;
1699
+ }
1700
+ }
1701
+ sharedMemPerMultiprocessor = userSmemPreference;
1702
+ }
1703
+
1704
+ // Compute total shared memory available per SM
1705
+ //
1706
+ smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks;
1707
+ smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity;
1708
+
1709
+ // Cap shared memory
1710
+ //
1711
+ if (smemAvailableForDynamic > smemLimitPerBlock) {
1712
+ smemAvailableForDynamic = smemLimitPerBlock;
1713
+ }
1714
+
1715
+ // Now compute dynamic shared memory size
1716
+ smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes;
1717
+
1718
+ // Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute()
1719
+ //
1720
+ if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes)
1721
+ smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes;
1722
+
1723
+ *bytesAvailable = smemAvailableForDynamic;
1724
+ return CUDA_OCC_SUCCESS;
1725
+ }
1726
+
1727
+ static __OCC_INLINE
1728
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1729
+ int *minGridSize,
1730
+ int *blockSize,
1731
+ const cudaOccDeviceProp *properties,
1732
+ const cudaOccFuncAttributes *attributes,
1733
+ const cudaOccDeviceState *state,
1734
+ size_t (*blockSizeToDynamicSMemSize)(int),
1735
+ size_t dynamicSMemSize)
1736
+ {
1737
+ cudaOccError status = CUDA_OCC_SUCCESS;
1738
+ cudaOccResult result;
1739
+
1740
+ // Limits
1741
+ int occupancyLimit;
1742
+ int granularity;
1743
+ int blockSizeLimit;
1744
+
1745
+ // Recorded maximum
1746
+ int maxBlockSize = 0;
1747
+ int numBlocks = 0;
1748
+ int maxOccupancy = 0;
1749
+
1750
+ // Temporary
1751
+ int blockSizeToTryAligned;
1752
+ int blockSizeToTry;
1753
+ int blockSizeLimitAligned;
1754
+ int occupancyInBlocks;
1755
+ int occupancyInThreads;
1756
+
1757
+ ///////////////////////////
1758
+ // Check user input
1759
+ ///////////////////////////
1760
+
1761
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1762
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1763
+ }
1764
+
1765
+ status = cudaOccInputCheck(properties, attributes, state);
1766
+ if (status != CUDA_OCC_SUCCESS) {
1767
+ return status;
1768
+ }
1769
+
1770
+ /////////////////////////////////////////////////////////////////////////////////
1771
+ // Try each block size, and pick the block size with maximum occupancy
1772
+ /////////////////////////////////////////////////////////////////////////////////
1773
+
1774
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1775
+ granularity = properties->warpSize;
1776
+
1777
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1778
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1779
+
1780
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1781
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1782
+
1783
+ // Ignore dynamicSMemSize if the user provides a mapping
1784
+ //
1785
+ if (blockSizeToDynamicSMemSize) {
1786
+ dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry);
1787
+ }
1788
+
1789
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1790
+ &result,
1791
+ properties,
1792
+ attributes,
1793
+ state,
1794
+ blockSizeToTry,
1795
+ dynamicSMemSize);
1796
+
1797
+ if (status != CUDA_OCC_SUCCESS) {
1798
+ return status;
1799
+ }
1800
+
1801
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1802
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1803
+
1804
+ if (occupancyInThreads > maxOccupancy) {
1805
+ maxBlockSize = blockSizeToTry;
1806
+ numBlocks = occupancyInBlocks;
1807
+ maxOccupancy = occupancyInThreads;
1808
+ }
1809
+
1810
+ // Early out if we have reached the maximum
1811
+ //
1812
+ if (occupancyLimit == maxOccupancy) {
1813
+ break;
1814
+ }
1815
+ }
1816
+
1817
+ ///////////////////////////
1818
+ // Return best available
1819
+ ///////////////////////////
1820
+
1821
+ // Suggested min grid size to achieve a full machine launch
1822
+ //
1823
+ *minGridSize = numBlocks * properties->numSms;
1824
+ *blockSize = maxBlockSize;
1825
+
1826
+ return status;
1827
+ }
1828
+
1829
+
1830
+ #if defined(__cplusplus)
1831
+
1832
+ namespace {
1833
+
1834
+ __OCC_INLINE
1835
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1836
+ int *minGridSize,
1837
+ int *blockSize,
1838
+ const cudaOccDeviceProp *properties,
1839
+ const cudaOccFuncAttributes *attributes,
1840
+ const cudaOccDeviceState *state,
1841
+ size_t dynamicSMemSize)
1842
+ {
1843
+ return cudaOccMaxPotentialOccupancyBlockSize(
1844
+ minGridSize,
1845
+ blockSize,
1846
+ properties,
1847
+ attributes,
1848
+ state,
1849
+ NULL,
1850
+ dynamicSMemSize);
1851
+ }
1852
+
1853
+ template <typename UnaryFunction>
1854
+ __OCC_INLINE
1855
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
1856
+ int *minGridSize,
1857
+ int *blockSize,
1858
+ const cudaOccDeviceProp *properties,
1859
+ const cudaOccFuncAttributes *attributes,
1860
+ const cudaOccDeviceState *state,
1861
+ UnaryFunction blockSizeToDynamicSMemSize)
1862
+ {
1863
+ cudaOccError status = CUDA_OCC_SUCCESS;
1864
+ cudaOccResult result;
1865
+
1866
+ // Limits
1867
+ int occupancyLimit;
1868
+ int granularity;
1869
+ int blockSizeLimit;
1870
+
1871
+ // Recorded maximum
1872
+ int maxBlockSize = 0;
1873
+ int numBlocks = 0;
1874
+ int maxOccupancy = 0;
1875
+
1876
+ // Temporary
1877
+ int blockSizeToTryAligned;
1878
+ int blockSizeToTry;
1879
+ int blockSizeLimitAligned;
1880
+ int occupancyInBlocks;
1881
+ int occupancyInThreads;
1882
+ size_t dynamicSMemSize;
1883
+
1884
+ ///////////////////////////
1885
+ // Check user input
1886
+ ///////////////////////////
1887
+
1888
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1889
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1890
+ }
1891
+
1892
+ status = cudaOccInputCheck(properties, attributes, state);
1893
+ if (status != CUDA_OCC_SUCCESS) {
1894
+ return status;
1895
+ }
1896
+
1897
+ /////////////////////////////////////////////////////////////////////////////////
1898
+ // Try each block size, and pick the block size with maximum occupancy
1899
+ /////////////////////////////////////////////////////////////////////////////////
1900
+
1901
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1902
+ granularity = properties->warpSize;
1903
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1904
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1905
+
1906
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1907
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1908
+
1909
+ dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
1910
+
1911
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1912
+ &result,
1913
+ properties,
1914
+ attributes,
1915
+ state,
1916
+ blockSizeToTry,
1917
+ dynamicSMemSize);
1918
+
1919
+ if (status != CUDA_OCC_SUCCESS) {
1920
+ return status;
1921
+ }
1922
+
1923
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1924
+
1925
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1926
+
1927
+ if (occupancyInThreads > maxOccupancy) {
1928
+ maxBlockSize = blockSizeToTry;
1929
+ numBlocks = occupancyInBlocks;
1930
+ maxOccupancy = occupancyInThreads;
1931
+ }
1932
+
1933
+ // Early out if we have reached the maximum
1934
+ //
1935
+ if (occupancyLimit == maxOccupancy) {
1936
+ break;
1937
+ }
1938
+ }
1939
+
1940
+ ///////////////////////////
1941
+ // Return best available
1942
+ ///////////////////////////
1943
+
1944
+ // Suggested min grid size to achieve a full machine launch
1945
+ //
1946
+ *minGridSize = numBlocks * properties->numSms;
1947
+ *blockSize = maxBlockSize;
1948
+
1949
+ return status;
1950
+ }
1951
+
1952
+ } // namespace anonymous
1953
+
1954
+ #endif /*__cplusplus */
1955
+
1956
+ #undef __OCC_INLINE
1957
+
1958
+ #endif /*__cuda_occupancy_h__*/
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_surface_types.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_SURFACE_TYPES_H__)
51
+ #define __CUDA_SURFACE_TYPES_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #if !defined(__CUDACC_RTC__)
62
+ #define EXCLUDE_FROM_RTC
63
+ #include "channel_descriptor.h"
64
+ #undef EXCLUDE_FROM_RTC
65
+ #endif /* !__CUDACC_RTC__ */
66
+ #include "cuda_runtime_api.h"
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #endif /* __cplusplus && __CUDACC__ */
75
+
76
+ #endif /* !__CUDA_SURFACE_TYPES_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_texture_types.h ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_TEXTURE_TYPES_H__)
51
+ #define __CUDA_TEXTURE_TYPES_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #if !defined(__CUDACC_RTC__)
62
+ #define EXCLUDE_FROM_RTC
63
+ #include "channel_descriptor.h"
64
+ #undef EXCLUDE_FROM_RTC
65
+ #endif /* !__CUDACC_RTC__ */
66
+ #include "cuda_runtime_api.h"
67
+
68
+ /*******************************************************************************
69
+ * *
70
+ * *
71
+ * *
72
+ *******************************************************************************/
73
+
74
+ #endif /* __cplusplus && __CUDACC__ */
75
+
76
+ #endif /* !__CUDA_TEXTURE_TYPES_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_VDPAU_INTEROP_H__)
51
+ #define __CUDA_VDPAU_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #include <vdpau/vdpau.h>
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /**
62
+ * \addtogroup CUDART_VDPAU VDPAU Interoperability
63
+ * This section describes the VDPAU interoperability functions of the CUDA
64
+ * runtime application programming interface.
65
+ *
66
+ * @{
67
+ */
68
+
69
+ /**
70
+ * \brief Gets the CUDA device associated with a VdpDevice.
71
+ *
72
+ * Returns the CUDA device associated with a VdpDevice, if applicable.
73
+ *
74
+ * \param device - Returns the device associated with vdpDevice, or -1 if
75
+ * the device associated with vdpDevice is not a compute device.
76
+ * \param vdpDevice - A VdpDevice handle
77
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
78
+ *
79
+ * \return
80
+ * ::cudaSuccess
81
+ * \notefnerr
82
+ *
83
+ * \sa
84
+ * ::cudaVDPAUSetVDPAUDevice,
85
+ * ::cuVDPAUGetDevice
86
+ */
87
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
88
+
89
+ /**
90
+ * \brief Sets a CUDA device to use VDPAU interoperability
91
+ *
92
+ * Records \p vdpDevice as the VdpDevice for VDPAU interoperability
93
+ * with the CUDA device \p device and sets \p device as the current
94
+ * device for the calling host thread.
95
+ *
96
+ * This function will immediately initialize the primary context on
97
+ * \p device if needed.
98
+ *
99
+ * If \p device has already been initialized then this call will fail
100
+ * with the error ::cudaErrorSetOnActiveProcess. In this case it is
101
+ * necessary to reset \p device using ::cudaDeviceReset() before
102
+ * VDPAU interoperability on \p device may be enabled.
103
+ *
104
+ * \param device - Device to use for VDPAU interoperability
105
+ * \param vdpDevice - The VdpDevice to interoperate with
106
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
107
+ *
108
+ * \return
109
+ * ::cudaSuccess,
110
+ * ::cudaErrorInvalidDevice,
111
+ * ::cudaErrorSetOnActiveProcess
112
+ * \notefnerr
113
+ *
114
+ * \sa ::cudaGraphicsVDPAURegisterVideoSurface,
115
+ * ::cudaGraphicsVDPAURegisterOutputSurface,
116
+ * ::cudaDeviceReset
117
+ */
118
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
119
+
120
+ /**
121
+ * \brief Register a VdpVideoSurface object
122
+ *
123
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA.
124
+ * A handle to the registered object is returned as \p resource.
125
+ * The surface's intended usage is specified using \p flags, as follows:
126
+ *
127
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
128
+ * resource will be used. It is therefore assumed that this resource will be
129
+ * read from and written to by CUDA. This is the default value.
130
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
131
+ * will not write to this resource.
132
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
133
+ * CUDA will not read from this resource and will write over the
134
+ * entire contents of the resource, so none of the data previously
135
+ * stored in the resource will be preserved.
136
+ *
137
+ * \param resource - Pointer to the returned object handle
138
+ * \param vdpSurface - VDPAU object to be registered
139
+ * \param flags - Map flags
140
+ *
141
+ * \return
142
+ * ::cudaSuccess,
143
+ * ::cudaErrorInvalidDevice,
144
+ * ::cudaErrorInvalidValue,
145
+ * ::cudaErrorInvalidResourceHandle,
146
+ * ::cudaErrorUnknown
147
+ * \notefnerr
148
+ *
149
+ * \sa
150
+ * ::cudaVDPAUSetVDPAUDevice,
151
+ * ::cudaGraphicsUnregisterResource,
152
+ * ::cudaGraphicsSubResourceGetMappedArray,
153
+ * ::cuGraphicsVDPAURegisterVideoSurface
154
+ */
155
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags);
156
+
157
+ /**
158
+ * \brief Register a VdpOutputSurface object
159
+ *
160
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA.
161
+ * A handle to the registered object is returned as \p resource.
162
+ * The surface's intended usage is specified using \p flags, as follows:
163
+ *
164
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
165
+ * resource will be used. It is therefore assumed that this resource will be
166
+ * read from and written to by CUDA. This is the default value.
167
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
168
+ * will not write to this resource.
169
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
170
+ * CUDA will not read from this resource and will write over the
171
+ * entire contents of the resource, so none of the data previously
172
+ * stored in the resource will be preserved.
173
+ *
174
+ * \param resource - Pointer to the returned object handle
175
+ * \param vdpSurface - VDPAU object to be registered
176
+ * \param flags - Map flags
177
+ *
178
+ * \return
179
+ * ::cudaSuccess,
180
+ * ::cudaErrorInvalidDevice,
181
+ * ::cudaErrorInvalidValue,
182
+ * ::cudaErrorInvalidResourceHandle,
183
+ * ::cudaErrorUnknown
184
+ * \notefnerr
185
+ *
186
+ * \sa
187
+ * ::cudaVDPAUSetVDPAUDevice,
188
+ * ::cudaGraphicsUnregisterResource,
189
+ * ::cudaGraphicsSubResourceGetMappedArray,
190
+ * ::cuGraphicsVDPAURegisterOutputSurface
191
+ */
192
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags);
193
+
194
+ /** @} */ /* END CUDART_VDPAU */
195
+
196
+ #if defined(__cplusplus)
197
+ }
198
+ #endif /* __cplusplus */
199
+
200
+ #endif /* __CUDA_VDPAU_INTEROP_H__ */
201
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef __CUDART_PLATFORM_H__
51
+ #define __CUDART_PLATFORM_H__
52
+
53
+ #if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)))
54
+ #define isEglSupported 1
55
+ #endif
56
+
57
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_ATOMIC_FUNCTIONS_H__)
51
+ #define __DEVICE_ATOMIC_FUNCTIONS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #elif defined(_NVHPC_CUDA)
56
+ # define __DEVICE_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__
57
+ #else /* __CUDACC_RTC__ */
58
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
59
+ #endif /* __CUDACC_RTC__ */
60
+
61
+ #if defined(__cplusplus) && defined(__CUDACC__)
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ /* Add !defined(_NVHPC_CUDA) to avoid empty function definition in PGI CUDA
72
+ * C++ compiler where the macro __CUDA_ARCH__ is not defined. */
73
+ #if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
74
+ #define __DEF_IF_HOST { }
75
+ #else /* !__CUDA_ARCH__ */
76
+ #define __DEF_IF_HOST ;
77
+ #endif /* __CUDA_ARCH__ */
78
+
79
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
80
+ extern "C"
81
+ {
82
+ extern __device__ __device_builtin__ int __iAtomicAdd(int *address, int val);
83
+ extern __device__ __device_builtin__ unsigned int __uAtomicAdd(unsigned int *address, unsigned int val);
84
+ extern __device__ __device_builtin__ int __iAtomicExch(int *address, int val);
85
+ extern __device__ __device_builtin__ unsigned int __uAtomicExch(unsigned int *address, unsigned int val);
86
+ extern __device__ __device_builtin__ float __fAtomicExch(float *address, float val);
87
+ extern __device__ __device_builtin__ int __iAtomicMin(int *address, int val);
88
+ extern __device__ __device_builtin__ unsigned int __uAtomicMin(unsigned int *address, unsigned int val);
89
+ extern __device__ __device_builtin__ int __iAtomicMax(int *address, int val);
90
+ extern __device__ __device_builtin__ unsigned int __uAtomicMax(unsigned int *address, unsigned int val);
91
+ extern __device__ __device_builtin__ unsigned int __uAtomicInc(unsigned int *address, unsigned int val);
92
+ extern __device__ __device_builtin__ unsigned int __uAtomicDec(unsigned int *address, unsigned int val);
93
+ extern __device__ __device_builtin__ int __iAtomicAnd(int *address, int val);
94
+ extern __device__ __device_builtin__ unsigned int __uAtomicAnd(unsigned int *address, unsigned int val);
95
+ extern __device__ __device_builtin__ int __iAtomicOr(int *address, int val);
96
+ extern __device__ __device_builtin__ unsigned int __uAtomicOr(unsigned int *address, unsigned int val);
97
+ extern __device__ __device_builtin__ int __iAtomicXor(int *address, int val);
98
+ extern __device__ __device_builtin__ unsigned int __uAtomicXor(unsigned int *address, unsigned int val);
99
+ extern __device__ __device_builtin__ int __iAtomicCAS(int *address, int compare, int val);
100
+ extern __device__ __device_builtin__ unsigned int __uAtomicCAS(unsigned int *address, unsigned int compare, unsigned int val);
101
+ }
102
+ #endif /* __CUDA_ARCH__ || defined(_NVHPC_CUDA) */
103
+
104
+ /*******************************************************************************
105
+ * *
106
+ * *
107
+ * *
108
+ *******************************************************************************/
109
+
110
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val) __DEF_IF_HOST
111
+
112
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val) __DEF_IF_HOST
113
+
114
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val) __DEF_IF_HOST
115
+
116
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val) __DEF_IF_HOST
117
+
118
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val) __DEF_IF_HOST
119
+
120
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val) __DEF_IF_HOST
121
+
122
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val) __DEF_IF_HOST
123
+
124
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val) __DEF_IF_HOST
125
+
126
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val) __DEF_IF_HOST
127
+
128
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val) __DEF_IF_HOST
129
+
130
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val) __DEF_IF_HOST
131
+
132
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val) __DEF_IF_HOST
133
+
134
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val) __DEF_IF_HOST
135
+
136
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val) __DEF_IF_HOST
137
+
138
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val) __DEF_IF_HOST
139
+
140
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val) __DEF_IF_HOST
141
+
142
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val) __DEF_IF_HOST
143
+
144
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val) __DEF_IF_HOST
145
+
146
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val) __DEF_IF_HOST
147
+
148
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val) __DEF_IF_HOST
149
+
150
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val) __DEF_IF_HOST
151
+
152
+ /*******************************************************************************
153
+ * *
154
+ * *
155
+ * *
156
+ *******************************************************************************/
157
+
158
+ #include "cuda_runtime_api.h"
159
+
160
+ #if defined(_WIN32)
161
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
162
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
163
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
164
+ #else
165
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
166
+ #endif
167
+
168
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
169
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on compute_70 and above, and should be replaced with "#x"_sync()."\
170
+ "To continue using "#x"(), specify virtual architecture compute_60 when targeting sm_70 and above, for example, using the pair of compiler options: -arch=compute_60 -code=sm_70."
171
+ #elif defined(_NVHPC_CUDA)
172
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()."
173
+ #else
174
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)."
175
+ #endif
176
+
177
+ extern "C"
178
+ {
179
+ #if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
180
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicAdd(unsigned long long int *address, unsigned long long int val);
181
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicExch(unsigned long long int *address, unsigned long long int val);
182
+ extern __device__ __device_builtin__ unsigned long long int __ullAtomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val);
183
+ #endif /* __CUDA_ARCH__ || _NVHPC_CUDA */
184
+ extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) int __any(int cond);
185
+ extern __device__ __device_builtin__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) int __all(int cond);
186
+ }
187
+
188
+
189
+ /*******************************************************************************
190
+ * *
191
+ * *
192
+ * *
193
+ *******************************************************************************/
194
+
195
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST
196
+
197
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val) __DEF_IF_HOST
198
+
199
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val) __DEF_IF_HOST
200
+
201
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__any)) bool any(bool cond) __DEF_IF_HOST
202
+
203
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__all)) bool all(bool cond) __DEF_IF_HOST
204
+
205
+ #undef __DEPRECATED__
206
+ #undef __WSB_DEPRECATION_MESSAGE
207
+
208
+ #endif /* __cplusplus && __CUDACC__ */
209
+
210
+ #undef __DEF_IF_HOST
211
+ #undef __DEVICE_ATOMIC_FUNCTIONS_DECL__
212
+
213
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
214
+ #include "device_atomic_functions.hpp"
215
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
216
+
217
+ #endif /* !__DEVICE_ATOMIC_FUNCTIONS_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __DEVICE_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ /*******************************************************************************
62
+ * *
63
+ * *
64
+ * *
65
+ *******************************************************************************/
66
+
67
+ #include "cuda_runtime_api.h"
68
+
69
+ /*******************************************************************************
70
+ * *
71
+ * *
72
+ * *
73
+ *******************************************************************************/
74
+
75
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val)
76
+ {
77
+ return __iAtomicAdd(address, val);
78
+ }
79
+
80
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val)
81
+ {
82
+ return __uAtomicAdd(address, val);
83
+ }
84
+
85
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val)
86
+ {
87
+ return __iAtomicAdd(address, (unsigned int)-(int)val);
88
+ }
89
+
90
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val)
91
+ {
92
+ return __uAtomicAdd(address, (unsigned int)-(int)val);
93
+ }
94
+
95
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val)
96
+ {
97
+ return __iAtomicExch(address, val);
98
+ }
99
+
100
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val)
101
+ {
102
+ return __uAtomicExch(address, val);
103
+ }
104
+
105
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val)
106
+ {
107
+ return __fAtomicExch(address, val);
108
+ }
109
+
110
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val)
111
+ {
112
+ return __iAtomicMin(address, val);
113
+ }
114
+
115
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val)
116
+ {
117
+ return __uAtomicMin(address, val);
118
+ }
119
+
120
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val)
121
+ {
122
+ return __iAtomicMax(address, val);
123
+ }
124
+
125
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val)
126
+ {
127
+ return __uAtomicMax(address, val);
128
+ }
129
+
130
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val)
131
+ {
132
+ return __uAtomicInc(address, val);
133
+ }
134
+
135
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val)
136
+ {
137
+ return __uAtomicDec(address, val);
138
+ }
139
+
140
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val)
141
+ {
142
+ return __iAtomicAnd(address, val);
143
+ }
144
+
145
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val)
146
+ {
147
+ return __uAtomicAnd(address, val);
148
+ }
149
+
150
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val)
151
+ {
152
+ return __iAtomicOr(address, val);
153
+ }
154
+
155
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val)
156
+ {
157
+ return __uAtomicOr(address, val);
158
+ }
159
+
160
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val)
161
+ {
162
+ return __iAtomicXor(address, val);
163
+ }
164
+
165
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val)
166
+ {
167
+ return __uAtomicXor(address, val);
168
+ }
169
+
170
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val)
171
+ {
172
+ return __iAtomicCAS(address, compare, val);
173
+ }
174
+
175
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val)
176
+ {
177
+ return __uAtomicCAS(address, compare, val);
178
+ }
179
+
180
+ /*******************************************************************************
181
+ * *
182
+ * *
183
+ * *
184
+ *******************************************************************************/
185
+
186
+ #include "cuda_runtime_api.h"
187
+
188
+ /*******************************************************************************
189
+ * *
190
+ * *
191
+ * *
192
+ *******************************************************************************/
193
+
194
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val)
195
+ {
196
+ return __ullAtomicAdd(address, val);
197
+ }
198
+
199
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val)
200
+ {
201
+ return __ullAtomicExch(address, val);
202
+ }
203
+
204
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val)
205
+ {
206
+ return __ullAtomicCAS(address, compare, val);
207
+ }
208
+
209
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ bool any(bool cond)
210
+ {
211
+ return (bool)__any((int)cond);
212
+ }
213
+
214
+ __DEVICE_ATOMIC_FUNCTIONS_DECL__ bool all(bool cond)
215
+ {
216
+ return (bool)__all((int)cond);
217
+ }
218
+
219
+ #endif /* __cplusplus && __CUDACC__ */
220
+
221
+ #undef __DEVICE_ATOMIC_FUNCTIONS_DECL__
222
+
223
+ #endif /* !__DEVICE_ATOMIC_FUNCTIONS_HPP__ */
224
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_double_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "device_double_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/device_double_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H_WRAPPER__
65
+ #endif