applied-ai-018 commited on
Commit
da80b95
·
verified ·
1 Parent(s): 5477c76

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. llmeval-env/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so +3 -0
  4. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 +3 -0
  10. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py +0 -0
  11. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h +1690 -0
  12. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h +348 -0
  13. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h +0 -0
  14. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h +109 -0
  15. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp +1546 -0
  16. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h +224 -0
  17. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h +373 -0
  18. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h +148 -0
  19. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h +2300 -0
  20. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h +118 -0
  21. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp +85 -0
  22. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp +221 -0
  23. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h +221 -0
  24. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp +604 -0
  25. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp +588 -0
  26. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h +543 -0
  27. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp +527 -0
  28. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h +177 -0
  29. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h +175 -0
  30. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp +316 -0
  31. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__init__.py +0 -0
  32. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py +0 -0
  34. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h +78 -0
  36. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h +658 -0
  37. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h +658 -0
  38. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h +540 -0
  39. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h +540 -0
  40. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h +608 -0
  41. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h +608 -0
  42. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h +571 -0
  43. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h +571 -0
  44. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h +219 -0
  45. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h +219 -0
  46. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h +1183 -0
  47. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h +1183 -0
  48. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h +501 -0
  49. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h +501 -0
  50. llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h +78 -0
.gitattributes CHANGED
@@ -95,3 +95,6 @@ llmeval-env/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.
95
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
96
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
97
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
95
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
96
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
97
  llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
98
+ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
99
+ llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
100
+ llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95cec42ae770c1f2251d204b03e12d56fdb2e5561e4898c07b40382fe2474589
3
+ size 28636664
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c5639ce397a9f5b82cd277432d146370674358334a4ce0d33fa9a5ca090ac8a
3
+ size 6842248
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h ADDED
@@ -0,0 +1,1690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _COOPERATIVE_GROUPS_H_
51
+ #define _COOPERATIVE_GROUPS_H_
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ #include "cooperative_groups/details/info.h"
56
+ #include "cooperative_groups/details/driver_abi.h"
57
+ #include "cooperative_groups/details/helpers.h"
58
+ #include "cooperative_groups/details/memory.h"
59
+
60
+ #if defined(_CG_HAS_STL_ATOMICS)
61
+ #include <cuda/atomic>
62
+ #define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope;
63
+ #else
64
+ #define _CG_THREAD_SCOPE(scope)
65
+ #endif
66
+
67
+ _CG_BEGIN_NAMESPACE
68
+
69
+ namespace details {
70
+ _CG_CONST_DECL unsigned int coalesced_group_id = 1;
71
+ _CG_CONST_DECL unsigned int multi_grid_group_id = 2;
72
+ _CG_CONST_DECL unsigned int grid_group_id = 3;
73
+ _CG_CONST_DECL unsigned int thread_block_id = 4;
74
+ _CG_CONST_DECL unsigned int multi_tile_group_id = 5;
75
+ _CG_CONST_DECL unsigned int cluster_group_id = 6;
76
+ }
77
+
78
+ /**
79
+ * class thread_group;
80
+ *
81
+ * Generic thread group type, into which all groups are convertible.
82
+ * It acts as a container for all storage necessary for the derived groups,
83
+ * and will dispatch the API calls to the correct derived group. This means
84
+ * that all derived groups must implement the same interface as thread_group.
85
+ */
86
+ class thread_group
87
+ {
88
+ protected:
89
+ struct group_data {
90
+ unsigned int _unused : 1;
91
+ unsigned int type : 7, : 0;
92
+ };
93
+
94
+ struct gg_data {
95
+ details::grid_workspace *gridWs;
96
+ };
97
+
98
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
99
+ struct mg_data {
100
+ unsigned long long _unused : 1;
101
+ unsigned long long type : 7;
102
+ unsigned long long handle : 56;
103
+ const details::multi_grid::multi_grid_functions *functions;
104
+ };
105
+ #endif
106
+
107
+ struct tg_data {
108
+ unsigned int is_tiled : 1;
109
+ unsigned int type : 7;
110
+ unsigned int size : 24;
111
+ // packed to 4b
112
+ unsigned int metaGroupSize : 16;
113
+ unsigned int metaGroupRank : 16;
114
+ // packed to 8b
115
+ unsigned int mask;
116
+ // packed to 12b
117
+ unsigned int _res;
118
+ };
119
+
120
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
121
+ friend class thread_block;
122
+
123
+ union __align__(8) {
124
+ group_data group;
125
+ tg_data coalesced;
126
+ gg_data grid;
127
+ #if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
128
+ mg_data multi_grid;
129
+ #endif
130
+ } _data;
131
+
132
+ _CG_QUALIFIER thread_group operator=(const thread_group& src);
133
+
134
+ _CG_QUALIFIER thread_group(unsigned int type) {
135
+ _data.group.type = type;
136
+ _data.group._unused = false;
137
+ }
138
+
139
+ #ifdef _CG_CPP11_FEATURES
140
+ static_assert(sizeof(tg_data) <= 16, "Failed size check");
141
+ static_assert(sizeof(gg_data) <= 16, "Failed size check");
142
+ # ifdef _CG_ABI_EXPERIMENTAL
143
+ static_assert(sizeof(mg_data) <= 16, "Failed size check");
144
+ # endif
145
+ #endif
146
+
147
+ public:
148
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
149
+
150
+ _CG_QUALIFIER unsigned long long size() const;
151
+ _CG_QUALIFIER unsigned long long num_threads() const;
152
+ _CG_QUALIFIER unsigned long long thread_rank() const;
153
+ _CG_QUALIFIER void sync() const;
154
+ _CG_QUALIFIER unsigned int get_type() const {
155
+ return _data.group.type;
156
+ }
157
+
158
+ };
159
+
160
+ template <unsigned int TyId>
161
+ struct thread_group_base : public thread_group {
162
+ _CG_QUALIFIER thread_group_base() : thread_group(TyId) {}
163
+ _CG_STATIC_CONST_DECL unsigned int id = TyId;
164
+ };
165
+
166
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
167
+
168
+ /**
169
+ * class multi_grid_group;
170
+ *
171
+ * Threads within this this group are guaranteed to be co-resident on the
172
+ * same system, on multiple devices within the same launched kernels.
173
+ * To use this group, the kernel must have been launched with
174
+ * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent),
175
+ * and the device must support it (queryable device attribute).
176
+ *
177
+ * Constructed via this_multi_grid();
178
+ */
179
+
180
+
181
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
182
+ class multi_grid_group;
183
+
184
+ // Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls
185
+ template <typename = void>
186
+ __device__ _CG_DEPRECATED multi_grid_group this_multi_grid();
187
+
188
+ class multi_grid_group : public thread_group_base<details::multi_grid_group_id>
189
+ {
190
+ private:
191
+ template <typename = void>
192
+ _CG_QUALIFIER multi_grid_group() {
193
+ _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics();
194
+ _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle();
195
+ }
196
+
197
+ friend multi_grid_group this_multi_grid<void>();
198
+
199
+ public:
200
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
201
+
202
+ _CG_QUALIFIER bool is_valid() const {
203
+ return (_data.multi_grid.handle != 0);
204
+ }
205
+
206
+ _CG_QUALIFIER void sync() const {
207
+ if (!is_valid()) {
208
+ _CG_ABORT();
209
+ }
210
+ _data.multi_grid.functions->sync(_data.multi_grid.handle);
211
+ }
212
+
213
+ _CG_QUALIFIER unsigned long long num_threads() const {
214
+ _CG_ASSERT(is_valid());
215
+ return _data.multi_grid.functions->size(_data.multi_grid.handle);
216
+ }
217
+
218
+ _CG_QUALIFIER unsigned long long size() const {
219
+ return num_threads();
220
+ }
221
+
222
+ _CG_QUALIFIER unsigned long long thread_rank() const {
223
+ _CG_ASSERT(is_valid());
224
+ return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle);
225
+ }
226
+
227
+ _CG_QUALIFIER unsigned int grid_rank() const {
228
+ _CG_ASSERT(is_valid());
229
+ return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle));
230
+ }
231
+
232
+ _CG_QUALIFIER unsigned int num_grids() const {
233
+ _CG_ASSERT(is_valid());
234
+ return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle));
235
+ }
236
+ };
237
+ # else
238
+ class multi_grid_group
239
+ {
240
+ private:
241
+ unsigned long long _handle;
242
+ unsigned int _size;
243
+ unsigned int _rank;
244
+
245
+ friend _CG_QUALIFIER multi_grid_group this_multi_grid();
246
+
247
+ _CG_QUALIFIER multi_grid_group() {
248
+ _handle = details::multi_grid::get_intrinsic_handle();
249
+ _size = details::multi_grid::size(_handle);
250
+ _rank = details::multi_grid::thread_rank(_handle);
251
+ }
252
+
253
+ public:
254
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
255
+
256
+ _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const {
257
+ return (_handle != 0);
258
+ }
259
+
260
+ _CG_QUALIFIER _CG_DEPRECATED void sync() const {
261
+ if (!is_valid()) {
262
+ _CG_ABORT();
263
+ }
264
+ details::multi_grid::sync(_handle);
265
+ }
266
+
267
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const {
268
+ _CG_ASSERT(is_valid());
269
+ return _size;
270
+ }
271
+
272
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const {
273
+ return num_threads();
274
+ }
275
+
276
+ _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const {
277
+ _CG_ASSERT(is_valid());
278
+ return _rank;
279
+ }
280
+
281
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const {
282
+ _CG_ASSERT(is_valid());
283
+ return (details::multi_grid::grid_rank(_handle));
284
+ }
285
+
286
+ _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const {
287
+ _CG_ASSERT(is_valid());
288
+ return (details::multi_grid::num_grids(_handle));
289
+ }
290
+ };
291
+ # endif
292
+
293
+ /**
294
+ * multi_grid_group this_multi_grid()
295
+ *
296
+ * Constructs a multi_grid_group
297
+ */
298
+ # if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
299
+ template <typename>
300
+ __device__
301
+ #else
302
+ _CG_QUALIFIER
303
+ # endif
304
+ _CG_DEPRECATED
305
+ multi_grid_group this_multi_grid()
306
+ {
307
+ return multi_grid_group();
308
+ }
309
+ #endif
310
+
311
+ /**
312
+ * class grid_group;
313
+ *
314
+ * Threads within this this group are guaranteed to be co-resident on the
315
+ * same device within the same launched kernel. To use this group, the kernel
316
+ * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent),
317
+ * and the device must support it (queryable device attribute).
318
+ *
319
+ * Constructed via this_grid();
320
+ */
321
+ class grid_group : public thread_group_base<details::grid_group_id>
322
+ {
323
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id;
324
+ friend _CG_QUALIFIER grid_group this_grid();
325
+
326
+ private:
327
+ _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) {
328
+ _data.grid.gridWs = gridWs;
329
+ }
330
+
331
+ public:
332
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
333
+
334
+ _CG_QUALIFIER bool is_valid() const {
335
+ return (_data.grid.gridWs != NULL);
336
+ }
337
+
338
+ _CG_QUALIFIER void sync() const {
339
+ if (!is_valid()) {
340
+ _CG_ABORT();
341
+ }
342
+ details::grid::sync(&_data.grid.gridWs->barrier);
343
+ }
344
+
345
+ _CG_STATIC_QUALIFIER unsigned long long size() {
346
+ return details::grid::size();
347
+ }
348
+
349
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank() {
350
+ return details::grid::thread_rank();
351
+ }
352
+
353
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
354
+ return details::grid::grid_dim();
355
+ }
356
+
357
+ _CG_STATIC_QUALIFIER unsigned long long num_threads() {
358
+ return details::grid::num_threads();
359
+ }
360
+
361
+ _CG_STATIC_QUALIFIER dim3 dim_blocks() {
362
+ return details::grid::dim_blocks();
363
+ }
364
+
365
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks() {
366
+ return details::grid::num_blocks();
367
+ }
368
+
369
+ _CG_STATIC_QUALIFIER dim3 block_index() {
370
+ return details::grid::block_index();
371
+ }
372
+
373
+ _CG_STATIC_QUALIFIER unsigned long long block_rank() {
374
+ return details::grid::block_rank();
375
+ }
376
+
377
+ # if defined(_CG_HAS_CLUSTER_GROUP)
378
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
379
+ return details::grid::dim_clusters();
380
+ }
381
+
382
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
383
+ return details::grid::num_clusters();
384
+ }
385
+
386
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
387
+ return details::grid::cluster_index();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
391
+ return details::grid::cluster_rank();
392
+ }
393
+ # endif
394
+ };
395
+
396
+ _CG_QUALIFIER grid_group this_grid() {
397
+ // Load a workspace from the driver
398
+ grid_group gg(details::get_grid_workspace());
399
+ #ifdef _CG_DEBUG
400
+ // *all* threads must be available to synchronize
401
+ gg.sync();
402
+ #endif // _CG_DEBUG
403
+ return gg;
404
+ }
405
+
406
+ #if defined(_CG_HAS_CLUSTER_GROUP)
407
+ /**
408
+ * class cluster_group
409
+ *
410
+ * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly
411
+ * divided along all dimensions to form groups of blocks, each group of which is
412
+ * a block cluster. Clustered grids are subject to various restrictions and
413
+ * limitations. Primarily, a cluster consists of at most 8 blocks by default
414
+ * (although the user is allowed to opt-in to non-standard sizes,) and clustered
415
+ * grids are subject to additional occupancy limitations due to per-cluster
416
+ * hardware resource consumption. In exchange, a block cluster is guaranteed to
417
+ * be a cooperative group, with access to all cooperative group capabilities, as
418
+ * well as cluster specific capabilities and accelerations. A cluster_group
419
+ * represents a block cluster.
420
+ *
421
+ * Constructed via this_cluster_group();
422
+ */
423
+ class cluster_group : public thread_group_base<details::cluster_group_id>
424
+ {
425
+ // Friends
426
+ friend _CG_QUALIFIER cluster_group this_cluster();
427
+
428
+ // Disable constructor
429
+ _CG_QUALIFIER cluster_group()
430
+ {
431
+ }
432
+
433
+ public:
434
+ //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster)
435
+
436
+ using arrival_token = struct {};
437
+
438
+ // Functionality exposed by the group
439
+ _CG_STATIC_QUALIFIER void sync()
440
+ {
441
+ return details::cluster::sync();
442
+ }
443
+
444
+ _CG_STATIC_QUALIFIER arrival_token barrier_arrive()
445
+ {
446
+ details::cluster::barrier_arrive();
447
+ return arrival_token();
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void barrier_wait()
451
+ {
452
+ return details::cluster::barrier_wait();
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&)
456
+ {
457
+ return details::cluster::barrier_wait();
458
+ }
459
+
460
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
461
+ {
462
+ return details::cluster::query_shared_rank(addr);
463
+ }
464
+
465
+ template <typename T>
466
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
467
+ {
468
+ return details::cluster::map_shared_rank(addr, rank);
469
+ }
470
+
471
+ _CG_STATIC_QUALIFIER dim3 block_index()
472
+ {
473
+ return details::cluster::block_index();
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
477
+ {
478
+ return details::cluster::block_rank();
479
+ }
480
+
481
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
482
+ {
483
+ return details::cluster::thread_rank();
484
+ }
485
+
486
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
487
+ {
488
+ return details::cluster::dim_blocks();
489
+ }
490
+
491
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
492
+ {
493
+ return details::cluster::num_blocks();
494
+ }
495
+
496
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
497
+ {
498
+ return details::cluster::dim_threads();
499
+ }
500
+
501
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
502
+ {
503
+ return details::cluster::num_threads();
504
+ }
505
+
506
+ // Legacy aliases
507
+ _CG_STATIC_QUALIFIER unsigned int size()
508
+ {
509
+ return num_threads();
510
+ }
511
+ };
512
+
513
+ /*
514
+ * cluster_group this_cluster()
515
+ *
516
+ * Constructs a cluster_group
517
+ */
518
+ _CG_QUALIFIER cluster_group this_cluster()
519
+ {
520
+ cluster_group cg;
521
+ #ifdef _CG_DEBUG
522
+ cg.sync();
523
+ #endif
524
+ return cg;
525
+ }
526
+ #endif
527
+
528
+ #if defined(_CG_CPP11_FEATURES)
529
+ class thread_block;
530
+ template <unsigned int MaxBlockSize>
531
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
532
+ #endif
533
+
534
+ /**
535
+ * class thread_block
536
+ *
537
+ * Every GPU kernel is executed by a grid of thread blocks, and threads within
538
+ * each block are guaranteed to reside on the same streaming multiprocessor.
539
+ * A thread_block represents a thread block whose dimensions are not known until runtime.
540
+ *
541
+ * Constructed via this_thread_block();
542
+ */
543
+ class thread_block : public thread_group_base<details::thread_block_id>
544
+ {
545
+ // Friends
546
+ friend _CG_QUALIFIER thread_block this_thread_block();
547
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
548
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz);
549
+
550
+ #if defined(_CG_CPP11_FEATURES)
551
+ template <unsigned int MaxBlockSize>
552
+ friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
553
+ template <unsigned int Size>
554
+ friend class __static_size_multi_warp_tile_base;
555
+
556
+ details::multi_warp_scratch* const tile_memory;
557
+
558
+ template <unsigned int MaxBlockSize>
559
+ _CG_QUALIFIER thread_block(block_tile_memory<MaxBlockSize>& scratch) :
560
+ tile_memory(details::get_scratch_ptr(&scratch)) {
561
+ #ifdef _CG_DEBUG
562
+ if (num_threads() > MaxBlockSize) {
563
+ details::abort();
564
+ }
565
+ #endif
566
+ #if !defined(_CG_HAS_RESERVED_SHARED)
567
+ tile_memory->init_barriers(thread_rank());
568
+ sync();
569
+ #endif
570
+ }
571
+ #endif
572
+
573
+ // Disable constructor
574
+ _CG_QUALIFIER thread_block()
575
+ #if defined(_CG_CPP11_FEATURES)
576
+ : tile_memory(details::get_scratch_ptr(NULL))
577
+ #endif
578
+ { }
579
+
580
+ // Internal Use
581
+ _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const {
582
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
583
+
584
+ // Invalid, immediately fail
585
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
586
+ details::abort();
587
+ return (thread_block());
588
+ }
589
+
590
+ unsigned int mask;
591
+ unsigned int base_offset = thread_rank() & (~(tilesz - 1));
592
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
593
+
594
+ mask = (unsigned int)(-1) >> (32 - masklength);
595
+ mask <<= (details::laneid() & ~(tilesz - 1));
596
+ thread_group tile = thread_group(details::coalesced_group_id);
597
+ tile._data.coalesced.mask = mask;
598
+ tile._data.coalesced.size = __popc(mask);
599
+ tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz;
600
+ tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz;
601
+ tile._data.coalesced.is_tiled = true;
602
+ return (tile);
603
+ }
604
+
605
+ public:
606
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id;
607
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
608
+
609
+ _CG_STATIC_QUALIFIER void sync() {
610
+ details::cta::sync();
611
+ }
612
+
613
+ _CG_STATIC_QUALIFIER unsigned int size() {
614
+ return details::cta::size();
615
+ }
616
+
617
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
618
+ return details::cta::thread_rank();
619
+ }
620
+
621
+ // Additional functionality exposed by the group
622
+ _CG_STATIC_QUALIFIER dim3 group_index() {
623
+ return details::cta::group_index();
624
+ }
625
+
626
+ _CG_STATIC_QUALIFIER dim3 thread_index() {
627
+ return details::cta::thread_index();
628
+ }
629
+
630
+ _CG_STATIC_QUALIFIER dim3 group_dim() {
631
+ return details::cta::block_dim();
632
+ }
633
+
634
+ _CG_STATIC_QUALIFIER dim3 dim_threads() {
635
+ return details::cta::dim_threads();
636
+ }
637
+
638
+ _CG_STATIC_QUALIFIER unsigned int num_threads() {
639
+ return details::cta::num_threads();
640
+ }
641
+
642
+ };
643
+
644
+ /**
645
+ * thread_block this_thread_block()
646
+ *
647
+ * Constructs a thread_block group
648
+ */
649
+ _CG_QUALIFIER thread_block this_thread_block()
650
+ {
651
+ return (thread_block());
652
+ }
653
+
654
+ #if defined(_CG_CPP11_FEATURES)
655
+ template <unsigned int MaxBlockSize>
656
+ _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch) {
657
+ return (thread_block(scratch));
658
+ }
659
+ #endif
660
+
661
+ /**
662
+ * class coalesced_group
663
+ *
664
+ * A group representing the current set of converged threads in a warp.
665
+ * The size of the group is not guaranteed and it may return a group of
666
+ * only one thread (itself).
667
+ *
668
+ * This group exposes warp-synchronous builtins.
669
+ * Constructed via coalesced_threads();
670
+ */
671
+ class coalesced_group : public thread_group_base<details::coalesced_group_id>
672
+ {
673
+ private:
674
+ friend _CG_QUALIFIER coalesced_group coalesced_threads();
675
+ friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
676
+ friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz);
677
+ friend class details::_coalesced_group_data_access;
678
+
679
+ _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const {
680
+ unsigned int member_pack = 0;
681
+ unsigned int member_rank = 0;
682
+ for (int bit_idx = 0; bit_idx < 32; bit_idx++) {
683
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
684
+ if (lane_bit) {
685
+ if (laneMask & lane_bit)
686
+ member_pack |= 1 << member_rank;
687
+ member_rank++;
688
+ }
689
+ }
690
+ return (member_pack);
691
+ }
692
+
693
+ // Internal Use
694
+ _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const {
695
+ const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
696
+
697
+ // Invalid, immediately fail
698
+ if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
699
+ details::abort();
700
+ return (coalesced_group(0));
701
+ }
702
+ if (size() <= tilesz) {
703
+ return (*this);
704
+ }
705
+
706
+ if ((_data.coalesced.is_tiled == true) && pow2_tilesz) {
707
+ unsigned int base_offset = (thread_rank() & (~(tilesz - 1)));
708
+ unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
709
+ unsigned int mask = (unsigned int)(-1) >> (32 - masklength);
710
+
711
+ mask <<= (details::laneid() & ~(tilesz - 1));
712
+ coalesced_group coalesced_tile = coalesced_group(mask);
713
+ coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz;
714
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
715
+ coalesced_tile._data.coalesced.is_tiled = true;
716
+ return (coalesced_tile);
717
+ }
718
+ else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) {
719
+ unsigned int mask = 0;
720
+ unsigned int member_rank = 0;
721
+ int seen_lanes = (thread_rank() / tilesz) * tilesz;
722
+ for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) {
723
+ unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
724
+ if (lane_bit) {
725
+ if (seen_lanes <= 0 && member_rank < tilesz) {
726
+ mask |= lane_bit;
727
+ member_rank++;
728
+ }
729
+ seen_lanes--;
730
+ }
731
+ }
732
+ coalesced_group coalesced_tile = coalesced_group(mask);
733
+ // Override parent with the size of this group
734
+ coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz;
735
+ coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
736
+ return coalesced_tile;
737
+ }
738
+ else {
739
+ // None in _CG_VERSION 1000
740
+ details::abort();
741
+ }
742
+
743
+ return (coalesced_group(0));
744
+ }
745
+
746
+ protected:
747
+ _CG_QUALIFIER coalesced_group(unsigned int mask) {
748
+ _data.coalesced.mask = mask;
749
+ _data.coalesced.size = __popc(mask);
750
+ _data.coalesced.metaGroupRank = 0;
751
+ _data.coalesced.metaGroupSize = 1;
752
+ _data.coalesced.is_tiled = false;
753
+ }
754
+
755
+ _CG_QUALIFIER unsigned int get_mask() const {
756
+ return (_data.coalesced.mask);
757
+ }
758
+
759
+ public:
760
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
761
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
762
+
763
+ _CG_QUALIFIER unsigned int num_threads() const {
764
+ return _data.coalesced.size;
765
+ }
766
+
767
+ _CG_QUALIFIER unsigned int size() const {
768
+ return num_threads();
769
+ }
770
+
771
+ _CG_QUALIFIER unsigned int thread_rank() const {
772
+ return (__popc(_data.coalesced.mask & details::lanemask32_lt()));
773
+ }
774
+
775
+ // Rank of this group in the upper level of the hierarchy
776
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
777
+ return _data.coalesced.metaGroupRank;
778
+ }
779
+
780
+ // Total num partitions created out of all CTAs when the group was created
781
+ _CG_QUALIFIER unsigned int meta_group_size() const {
782
+ return _data.coalesced.metaGroupSize;
783
+ }
784
+
785
+ _CG_QUALIFIER void sync() const {
786
+ __syncwarp(_data.coalesced.mask);
787
+ }
788
+
789
+ #ifdef _CG_CPP11_FEATURES
790
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
791
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
792
+ unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 :
793
+ (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1));
794
+
795
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
796
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
797
+ }
798
+
799
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
800
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
801
+ if (size() == 32) {
802
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
803
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
804
+ }
805
+
806
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
807
+
808
+ if (lane >= 32)
809
+ lane = details::laneid();
810
+
811
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
812
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
813
+ }
814
+
815
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
816
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const {
817
+ if (size() == 32) {
818
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
819
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
820
+ }
821
+
822
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
823
+ if (lane >= 32)
824
+ lane = details::laneid();
825
+
826
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
827
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
828
+ }
829
+ #else
830
+ template <typename TyIntegral>
831
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const {
832
+ details::assert_if_not_arithmetic<TyIntegral>();
833
+ unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 :
834
+ (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1));
835
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
836
+ }
837
+
838
+ template <typename TyIntegral>
839
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const {
840
+ details::assert_if_not_arithmetic<TyIntegral>();
841
+ if (size() == 32) {
842
+ return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32));
843
+ }
844
+ unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
845
+ if (lane >= 32) lane = details::laneid();
846
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
847
+ }
848
+
849
+ template <typename TyIntegral>
850
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const {
851
+ details::assert_if_not_arithmetic<TyIntegral>();
852
+ if (size() == 32) {
853
+ return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32));
854
+ }
855
+ unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
856
+ if (lane >= 32) lane = details::laneid();
857
+ return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
858
+ }
859
+ #endif
860
+
861
+ _CG_QUALIFIER int any(int predicate) const {
862
+ return (__ballot_sync(_data.coalesced.mask, predicate) != 0);
863
+ }
864
+ _CG_QUALIFIER int all(int predicate) const {
865
+ return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask);
866
+ }
867
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
868
+ if (size() == 32) {
869
+ return (__ballot_sync(0xFFFFFFFF, predicate));
870
+ }
871
+ unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate);
872
+ return (_packLanes(lane_ballot));
873
+ }
874
+
875
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
876
+
877
+ template <typename TyIntegral>
878
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
879
+ details::assert_if_not_arithmetic<TyIntegral>();
880
+ if (size() == 32) {
881
+ return (__match_any_sync(0xFFFFFFFF, val));
882
+ }
883
+ unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val);
884
+ return (_packLanes(lane_match));
885
+ }
886
+
887
+ template <typename TyIntegral>
888
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
889
+ details::assert_if_not_arithmetic<TyIntegral>();
890
+ if (size() == 32) {
891
+ return (__match_all_sync(0xFFFFFFFF, val, &pred));
892
+ }
893
+ unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred);
894
+ return (_packLanes(lane_match));
895
+ }
896
+
897
+ #endif /* !_CG_HAS_MATCH_COLLECTIVE */
898
+
899
+ };
900
+
901
+ _CG_QUALIFIER coalesced_group coalesced_threads()
902
+ {
903
+ return (coalesced_group(__activemask()));
904
+ }
905
+
906
+ namespace details {
907
+ template <unsigned int Size> struct verify_thread_block_tile_size;
908
+ template <> struct verify_thread_block_tile_size<32> { typedef void OK; };
909
+ template <> struct verify_thread_block_tile_size<16> { typedef void OK; };
910
+ template <> struct verify_thread_block_tile_size<8> { typedef void OK; };
911
+ template <> struct verify_thread_block_tile_size<4> { typedef void OK; };
912
+ template <> struct verify_thread_block_tile_size<2> { typedef void OK; };
913
+ template <> struct verify_thread_block_tile_size<1> { typedef void OK; };
914
+
915
+ #ifdef _CG_CPP11_FEATURES
916
+ template <unsigned int Size>
917
+ using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant<bool, (Size & (Size - 1)) == 0>;
918
+
919
+ template <unsigned int Size>
920
+ using _is_single_warp = _CG_STL_NAMESPACE::integral_constant<bool, Size <= 32>;
921
+ template <unsigned int Size>
922
+ using _is_multi_warp =
923
+ _CG_STL_NAMESPACE::integral_constant<bool, (Size > 32) && (Size <= 1024)>;
924
+
925
+ template <unsigned int Size>
926
+ using _is_valid_single_warp_tile =
927
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_single_warp<Size>::value>;
928
+ template <unsigned int Size>
929
+ using _is_valid_multi_warp_tile =
930
+ _CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_multi_warp<Size>::value>;
931
+ #else
932
+ template <unsigned int Size>
933
+ struct _is_multi_warp {
934
+ static const bool value = false;
935
+ };
936
+ #endif
937
+ }
938
+
939
+ template <unsigned int Size>
940
+ class __static_size_tile_base
941
+ {
942
+ protected:
943
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
944
+
945
+ public:
946
+ _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
947
+
948
+ // Rank of thread within tile
949
+ _CG_STATIC_QUALIFIER unsigned int thread_rank() {
950
+ return (details::cta::thread_rank() & (numThreads - 1));
951
+ }
952
+
953
+ // Number of threads within tile
954
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() {
955
+ return numThreads;
956
+ }
957
+
958
+ _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() {
959
+ return num_threads();
960
+ }
961
+ };
962
+
963
+ template <unsigned int Size>
964
+ class __static_size_thread_block_tile_base : public __static_size_tile_base<Size>
965
+ {
966
+ friend class details::_coalesced_group_data_access;
967
+ typedef details::tile::tile_helpers<Size> th;
968
+
969
+ #ifdef _CG_CPP11_FEATURES
970
+ static_assert(details::_is_valid_single_warp_tile<Size>::value, "Size must be one of 1/2/4/8/16/32");
971
+ #else
972
+ typedef typename details::verify_thread_block_tile_size<Size>::OK valid;
973
+ #endif
974
+ using __static_size_tile_base<Size>::numThreads;
975
+ _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF;
976
+
977
+ protected:
978
+ _CG_STATIC_QUALIFIER unsigned int build_mask() {
979
+ unsigned int mask = fullMask;
980
+ if (numThreads != 32) {
981
+ // [0,31] representing the current active thread in the warp
982
+ unsigned int laneId = details::laneid();
983
+ // shift mask according to the partition it belongs to
984
+ mask = th::tileMask << (laneId & ~(th::laneMask));
985
+ }
986
+ return (mask);
987
+ }
988
+
989
+ public:
990
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
991
+
992
+ _CG_STATIC_QUALIFIER void sync() {
993
+ __syncwarp(build_mask());
994
+ }
995
+
996
+ #ifdef _CG_CPP11_FEATURES
997
+ // PTX supported collectives
998
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
999
+ _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
1000
+ return details::tile::shuffle_dispatch<TyElem>::shfl(
1001
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), srcRank, numThreads);
1002
+ }
1003
+
1004
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1005
+ _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
1006
+ return details::tile::shuffle_dispatch<TyElem>::shfl_down(
1007
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1008
+ }
1009
+
1010
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1011
+ _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const {
1012
+ return details::tile::shuffle_dispatch<TyElem>::shfl_up(
1013
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
1014
+ }
1015
+
1016
+ template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
1017
+ _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const {
1018
+ return details::tile::shuffle_dispatch<TyElem>::shfl_xor(
1019
+ _CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), laneMask, numThreads);
1020
+ }
1021
+ #else
1022
+ template <typename TyIntegral>
1023
+ _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const {
1024
+ details::assert_if_not_arithmetic<TyIntegral>();
1025
+ return (__shfl_sync(build_mask(), var, srcRank, numThreads));
1026
+ }
1027
+
1028
+ template <typename TyIntegral>
1029
+ _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const {
1030
+ details::assert_if_not_arithmetic<TyIntegral>();
1031
+ return (__shfl_down_sync(build_mask(), var, delta, numThreads));
1032
+ }
1033
+
1034
+ template <typename TyIntegral>
1035
+ _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const {
1036
+ details::assert_if_not_arithmetic<TyIntegral>();
1037
+ return (__shfl_up_sync(build_mask(), var, delta, numThreads));
1038
+ }
1039
+
1040
+ template <typename TyIntegral>
1041
+ _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const {
1042
+ details::assert_if_not_arithmetic<TyIntegral>();
1043
+ return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads));
1044
+ }
1045
+ #endif //_CG_CPP11_FEATURES
1046
+
1047
+ _CG_QUALIFIER int any(int predicate) const {
1048
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1049
+ return (lane_ballot != 0);
1050
+ }
1051
+ _CG_QUALIFIER int all(int predicate) const {
1052
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1053
+ return (lane_ballot == build_mask());
1054
+ }
1055
+ _CG_QUALIFIER unsigned int ballot(int predicate) const {
1056
+ unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
1057
+ return (lane_ballot >> (details::laneid() & (~(th::laneMask))));
1058
+ }
1059
+
1060
+ #ifdef _CG_HAS_MATCH_COLLECTIVE
1061
+ template <typename TyIntegral>
1062
+ _CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
1063
+ details::assert_if_not_arithmetic<TyIntegral>();
1064
+ unsigned int lane_match = __match_any_sync(build_mask(), val);
1065
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1066
+ }
1067
+
1068
+ template <typename TyIntegral>
1069
+ _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
1070
+ details::assert_if_not_arithmetic<TyIntegral>();
1071
+ unsigned int lane_match = __match_all_sync(build_mask(), val, &pred);
1072
+ return (lane_match >> (details::laneid() & (~(th::laneMask))));
1073
+ }
1074
+ #endif
1075
+
1076
+ };
1077
+
1078
+ template <unsigned int Size, typename ParentT>
1079
+ class __static_parent_thread_block_tile_base
1080
+ {
1081
+ public:
1082
+ // Rank of this group in the upper level of the hierarchy
1083
+ _CG_STATIC_QUALIFIER unsigned int meta_group_rank() {
1084
+ return ParentT::thread_rank() / Size;
1085
+ }
1086
+
1087
+ // Total num partitions created out of all CTAs when the group was created
1088
+ _CG_STATIC_QUALIFIER unsigned int meta_group_size() {
1089
+ return (ParentT::size() + Size - 1) / Size;
1090
+ }
1091
+ };
1092
+
1093
+ /**
1094
+ * class thread_block_tile<unsigned int Size, ParentT = void>
1095
+ *
1096
+ * Statically-sized group type, representing one tile of a thread block.
1097
+ * The only specializations currently supported are those with native
1098
+ * hardware support (1/2/4/8/16/32)
1099
+ *
1100
+ * This group exposes warp-synchronous builtins.
1101
+ * Can only be constructed via tiled_partition<Size>(ParentT&)
1102
+ */
1103
+
1104
+ template <unsigned int Size, typename ParentT = void>
1105
+ class __single_warp_thread_block_tile :
1106
+ public __static_size_thread_block_tile_base<Size>,
1107
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1108
+ {
1109
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1110
+ friend class details::_coalesced_group_data_access;
1111
+
1112
+ protected:
1113
+ _CG_QUALIFIER __single_warp_thread_block_tile() { };
1114
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { };
1115
+
1116
+ _CG_STATIC_QUALIFIER unsigned int get_mask() {
1117
+ return __static_size_thread_block_tile_base<Size>::build_mask();
1118
+ }
1119
+ };
1120
+
1121
+ template <unsigned int Size>
1122
+ class __single_warp_thread_block_tile<Size, void> :
1123
+ public __static_size_thread_block_tile_base<Size>,
1124
+ public thread_group_base<details::coalesced_group_id>
1125
+ {
1126
+ _CG_STATIC_CONST_DECL unsigned int numThreads = Size;
1127
+
1128
+ template <unsigned int, typename ParentT> friend class __single_warp_thread_block_tile;
1129
+ friend class details::_coalesced_group_data_access;
1130
+
1131
+ typedef __static_size_thread_block_tile_base<numThreads> staticSizeBaseT;
1132
+
1133
+ protected:
1134
+ _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) {
1135
+ _data.coalesced.mask = staticSizeBaseT::build_mask();
1136
+ _data.coalesced.size = numThreads;
1137
+ _data.coalesced.metaGroupRank = meta_group_rank;
1138
+ _data.coalesced.metaGroupSize = meta_group_size;
1139
+ _data.coalesced.is_tiled = true;
1140
+ }
1141
+
1142
+ _CG_QUALIFIER unsigned int get_mask() const {
1143
+ return (_data.coalesced.mask);
1144
+ }
1145
+
1146
+ public:
1147
+ using staticSizeBaseT::sync;
1148
+ using staticSizeBaseT::size;
1149
+ using staticSizeBaseT::num_threads;
1150
+ using staticSizeBaseT::thread_rank;
1151
+
1152
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1153
+ return _data.coalesced.metaGroupRank;
1154
+ }
1155
+
1156
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1157
+ return _data.coalesced.metaGroupSize;
1158
+ }
1159
+ };
1160
+
1161
+ /**
1162
+ * Outer level API calls
1163
+ * void sync(GroupT) - see <group_type>.sync()
1164
+ * void thread_rank(GroupT) - see <group_type>.thread_rank()
1165
+ * void group_size(GroupT) - see <group_type>.size()
1166
+ */
1167
+ template <class GroupT>
1168
+ _CG_QUALIFIER void sync(GroupT const &g)
1169
+ {
1170
+ g.sync();
1171
+ }
1172
+
1173
+ // TODO: Use a static dispatch to determine appropriate return type
1174
+ // C++03 is stuck with unsigned long long for now
1175
+ #ifdef _CG_CPP11_FEATURES
1176
+ template <class GroupT>
1177
+ _CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) {
1178
+ return g.thread_rank();
1179
+ }
1180
+
1181
+
1182
+ template <class GroupT>
1183
+ _CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) {
1184
+ return g.num_threads();
1185
+ }
1186
+ #else
1187
+ template <class GroupT>
1188
+ _CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) {
1189
+ return static_cast<unsigned long long>(g.thread_rank());
1190
+ }
1191
+
1192
+
1193
+ template <class GroupT>
1194
+ _CG_QUALIFIER unsigned long long group_size(GroupT const &g) {
1195
+ return static_cast<unsigned long long>(g.num_threads());
1196
+ }
1197
+ #endif
1198
+
1199
+
1200
+ /**
1201
+ * tiled_partition
1202
+ *
1203
+ * The tiled_partition(parent, tilesz) method is a collective operation that
1204
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1205
+ *
1206
+ * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will
1207
+ * be created where threads having identical k = (thread_rank(parent)/tilesz)
1208
+ * will be members of the same subgroup.
1209
+ *
1210
+ * The implementation may cause the calling thread to wait until all the members
1211
+ * of the parent group have invoked the operation before resuming execution.
1212
+ *
1213
+ * Functionality is limited to power-of-two sized subgorup instances of at most
1214
+ * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be
1215
+ * tiled_partition() in _CG_VERSION 1000.
1216
+ */
1217
+ _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz)
1218
+ {
1219
+ if (parent.get_type() == details::coalesced_group_id) {
1220
+ const coalesced_group *_cg = static_cast<const coalesced_group*>(&parent);
1221
+ return _cg->_get_tiled_threads(tilesz);
1222
+ }
1223
+ else {
1224
+ const thread_block *_tb = static_cast<const thread_block*>(&parent);
1225
+ return _tb->_get_tiled_threads(tilesz);
1226
+ }
1227
+ }
1228
+
1229
+ // Thread block type overload: returns a basic thread_group for now (may be specialized later)
1230
+ _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz)
1231
+ {
1232
+ return (parent._get_tiled_threads(tilesz));
1233
+ }
1234
+
1235
+ // Coalesced group type overload: retains its ability to stay coalesced
1236
+ _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz)
1237
+ {
1238
+ return (parent._get_tiled_threads(tilesz));
1239
+ }
1240
+
1241
+ namespace details {
1242
+ template <unsigned int Size, typename ParentT>
1243
+ class internal_thread_block_tile : public __single_warp_thread_block_tile<Size, ParentT> {};
1244
+
1245
+ template <unsigned int Size, typename ParentT>
1246
+ _CG_QUALIFIER internal_thread_block_tile<Size, ParentT> tiled_partition_internal() {
1247
+ return internal_thread_block_tile<Size, ParentT>();
1248
+ }
1249
+
1250
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1251
+ _CG_QUALIFIER TyVal multi_warp_collectives_helper(
1252
+ const GroupT& group,
1253
+ WarpLambda warp_lambda,
1254
+ InterWarpLambda inter_warp_lambda) {
1255
+ return group.template collectives_scheme<TyVal>(warp_lambda, inter_warp_lambda);
1256
+ }
1257
+
1258
+ template <typename T, typename GroupT>
1259
+ _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) {
1260
+ return group.template get_scratch_location<T>(warp_id);
1261
+ }
1262
+
1263
+ template <typename GroupT>
1264
+ _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) {
1265
+ return group.get_sync_location();
1266
+ }
1267
+
1268
+ }
1269
+ /**
1270
+ * tiled_partition<tilesz>
1271
+ *
1272
+ * The tiled_partition<tilesz>(parent) method is a collective operation that
1273
+ * partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
1274
+ *
1275
+ * A total of ((size(parent)/tilesz) subgroups will be created,
1276
+ * therefore the parent group size must be evenly divisible by the tilesz.
1277
+ * The allow parent groups are thread_block or thread_block_tile<size>.
1278
+ *
1279
+ * The implementation may cause the calling thread to wait until all the members
1280
+ * of the parent group have invoked the operation before resuming execution.
1281
+ *
1282
+ * Functionality is limited to native hardware sizes, 1/2/4/8/16/32.
1283
+ * The size(parent) must be greater than the template Size parameter
1284
+ * otherwise the results are undefined.
1285
+ */
1286
+
1287
+ #if defined(_CG_CPP11_FEATURES)
1288
+ template <unsigned int Size>
1289
+ class __static_size_multi_warp_tile_base : public __static_size_tile_base<Size>
1290
+ {
1291
+ static_assert(details::_is_valid_multi_warp_tile<Size>::value, "Size must be one of 64/128/256/512");
1292
+
1293
+ template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
1294
+ friend __device__ TyVal details::multi_warp_collectives_helper(
1295
+ const GroupT& group,
1296
+ WarpLambda warp_lambda,
1297
+ InterWarpLambda inter_warp_lambda);
1298
+ template <typename T, typename GroupT>
1299
+ friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id);
1300
+ template <typename GroupT>
1301
+ friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group);
1302
+ template <unsigned int OtherSize>
1303
+ friend class __static_size_multi_warp_tile_base;
1304
+ using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
1305
+ using ThisType = __static_size_multi_warp_tile_base<Size>;
1306
+ _CG_STATIC_CONST_DECL int numWarps = Size / 32;
1307
+
1308
+ protected:
1309
+ details::multi_warp_scratch* const tile_memory;
1310
+
1311
+ template <typename GroupT>
1312
+ _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) {
1313
+ #if defined(_CG_HAS_RESERVED_SHARED)
1314
+ details::sync_warps_reset(get_sync_location(), details::cta::thread_rank());
1315
+ g.sync();
1316
+ #endif
1317
+ }
1318
+
1319
+
1320
+ private:
1321
+ _CG_QUALIFIER details::barrier_t* get_sync_location() const {
1322
+ // Different group sizes use different barriers, all groups of a given size share one barrier.
1323
+ unsigned int sync_id = details::log2(Size / 64);
1324
+ return &tile_memory->barriers[sync_id];
1325
+ }
1326
+
1327
+ template <typename T>
1328
+ _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const {
1329
+ unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id;
1330
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1331
+ }
1332
+
1333
+ template <typename T>
1334
+ _CG_QUALIFIER T* get_scratch_location() const {
1335
+ unsigned int scratch_id = details::cta::thread_rank() / 32;
1336
+ return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
1337
+ }
1338
+
1339
+ template <typename TyVal>
1340
+ _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const {
1341
+ unsigned int src_warp = src / 32;
1342
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1343
+ details::barrier_t* sync_location = get_sync_location();
1344
+
1345
+ // Get warp slot of the source threads warp.
1346
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>(src_warp);
1347
+
1348
+ if (warp.meta_group_rank() == src_warp) {
1349
+ warp.sync();
1350
+ // Put shuffled value into my warp slot and let my warp arrive at the barrier.
1351
+ if (thread_rank() == src) {
1352
+ *warp_scratch_location = val;
1353
+ }
1354
+ details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps);
1355
+ TyVal result = *warp_scratch_location;
1356
+ details::sync_warps_wait(sync_location, details::cta::thread_rank());
1357
+ return result;
1358
+ }
1359
+ else {
1360
+ // Wait for the source warp to arrive on the barrier.
1361
+ details::sync_warps_wait_for_specific_warp(sync_location,
1362
+ (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp));
1363
+ TyVal result = *warp_scratch_location;
1364
+ details::sync_warps(sync_location, details::cta::thread_rank(), numWarps);
1365
+ return result;
1366
+ }
1367
+ }
1368
+
1369
+ template <typename TyVal, typename WarpLambda, typename InterWarpLambda>
1370
+ _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const {
1371
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1372
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1373
+ auto warp = details::tiled_partition_internal<32, ThisType>();
1374
+ details::barrier_t* sync_location = get_sync_location();
1375
+ TyVal* warp_scratch_location = get_scratch_location<TyVal>();
1376
+
1377
+ warp_lambda(warp, warp_scratch_location);
1378
+
1379
+ if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) {
1380
+ auto subwarp = details::tiled_partition_internal<numWarps, decltype(warp)>();
1381
+ if (subwarp.meta_group_rank() == 0) {
1382
+ TyVal* thread_scratch_location = get_scratch_location<TyVal>(subwarp.thread_rank());
1383
+ inter_warp_lambda(subwarp, thread_scratch_location);
1384
+ }
1385
+ warp.sync();
1386
+ details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps);
1387
+ }
1388
+ TyVal result = *warp_scratch_location;
1389
+ return result;
1390
+ }
1391
+
1392
+ public:
1393
+ _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id;
1394
+
1395
+ using __static_size_tile_base<Size>::thread_rank;
1396
+
1397
+ template <typename TyVal>
1398
+ _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const {
1399
+ static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
1400
+ "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
1401
+ return shfl_impl(val, src);
1402
+ }
1403
+
1404
+ _CG_QUALIFIER void sync() const {
1405
+ details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps);
1406
+ }
1407
+
1408
+ _CG_QUALIFIER int any(int predicate) const {
1409
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1410
+ *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate);
1411
+ };
1412
+ auto inter_warp_lambda =
1413
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1414
+ *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1415
+ };
1416
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1417
+ }
1418
+
1419
+ _CG_QUALIFIER int all(int predicate) const {
1420
+ auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
1421
+ *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate);
1422
+ };
1423
+ auto inter_warp_lambda =
1424
+ [] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
1425
+ *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
1426
+ };
1427
+ return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
1428
+ }
1429
+ };
1430
+
1431
+
1432
+ template <unsigned int Size, typename ParentT = void>
1433
+ class __multi_warp_thread_block_tile :
1434
+ public __static_size_multi_warp_tile_base<Size>,
1435
+ public __static_parent_thread_block_tile_base<Size, ParentT>
1436
+ {
1437
+ typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
1438
+ typedef __static_size_multi_warp_tile_base<Size> staticTileBaseT;
1439
+ protected:
1440
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) :
1441
+ __static_size_multi_warp_tile_base<Size>(g) {}
1442
+ };
1443
+
1444
+ template <unsigned int Size>
1445
+ class __multi_warp_thread_block_tile<Size, void> : public __static_size_multi_warp_tile_base<Size>
1446
+ {
1447
+ const unsigned int metaGroupRank;
1448
+ const unsigned int metaGroupSize;
1449
+
1450
+ protected:
1451
+ template <unsigned int OtherSize, typename ParentT>
1452
+ _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile<OtherSize, ParentT>& g) :
1453
+ __static_size_multi_warp_tile_base<Size>(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {}
1454
+
1455
+ public:
1456
+ _CG_QUALIFIER unsigned int meta_group_rank() const {
1457
+ return metaGroupRank;
1458
+ }
1459
+
1460
+ _CG_QUALIFIER unsigned int meta_group_size() const {
1461
+ return metaGroupSize;
1462
+ }
1463
+ };
1464
+ #endif
1465
+
1466
+ template <unsigned int Size, typename ParentT = void>
1467
+ class thread_block_tile;
1468
+
1469
+ namespace details {
1470
+ template <unsigned int Size, typename ParentT, bool IsMultiWarp>
1471
+ class thread_block_tile_impl;
1472
+
1473
+ template <unsigned int Size, typename ParentT>
1474
+ class thread_block_tile_impl<Size, ParentT, false>: public __single_warp_thread_block_tile<Size, ParentT>
1475
+ {
1476
+ protected:
1477
+ template <unsigned int OtherSize, typename OtherParentT, bool OtherIsMultiWarp>
1478
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl<OtherSize, OtherParentT, OtherIsMultiWarp>& g) :
1479
+ __single_warp_thread_block_tile<Size, ParentT>(g.meta_group_rank(), g.meta_group_size()) {}
1480
+
1481
+ _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) :
1482
+ __single_warp_thread_block_tile<Size, ParentT>() {}
1483
+ };
1484
+
1485
+ #if defined(_CG_CPP11_FEATURES)
1486
+ template <unsigned int Size, typename ParentT>
1487
+ class thread_block_tile_impl<Size, ParentT, true> : public __multi_warp_thread_block_tile<Size, ParentT>
1488
+ {
1489
+ protected:
1490
+ template <typename GroupT>
1491
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) :
1492
+ __multi_warp_thread_block_tile<Size, ParentT>(g) {}
1493
+ };
1494
+ #else
1495
+ template <unsigned int Size, typename ParentT>
1496
+ class thread_block_tile_impl<Size, ParentT, true>
1497
+ {
1498
+ protected:
1499
+ template <typename GroupT>
1500
+ _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {}
1501
+ };
1502
+ #endif
1503
+ }
1504
+
1505
+ template <unsigned int Size, typename ParentT>
1506
+ class thread_block_tile : public details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>
1507
+ {
1508
+ friend _CG_QUALIFIER thread_block_tile<1, void> this_thread();
1509
+
1510
+ protected:
1511
+ _CG_QUALIFIER thread_block_tile(const ParentT& g) :
1512
+ details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>(g) {}
1513
+
1514
+ public:
1515
+ _CG_QUALIFIER operator thread_block_tile<Size, void>() const {
1516
+ return thread_block_tile<Size, void>(*this);
1517
+ }
1518
+ };
1519
+
1520
+ template <unsigned int Size>
1521
+ class thread_block_tile<Size, void> : public details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>
1522
+ {
1523
+ template <unsigned int, typename ParentT>
1524
+ friend class thread_block_tile;
1525
+
1526
+ protected:
1527
+ template <unsigned int OtherSize, typename OtherParentT>
1528
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<OtherSize, OtherParentT>& g) :
1529
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1530
+
1531
+ public:
1532
+ template <typename ParentT>
1533
+ _CG_QUALIFIER thread_block_tile(const thread_block_tile<Size, ParentT>& g) :
1534
+ details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
1535
+ };
1536
+
1537
+ namespace details {
1538
+ template <unsigned int Size, typename ParentT>
1539
+ struct tiled_partition_impl;
1540
+
1541
+ template <unsigned int Size>
1542
+ struct tiled_partition_impl<Size, thread_block> : public thread_block_tile<Size, thread_block> {
1543
+ _CG_QUALIFIER tiled_partition_impl(const thread_block& g) :
1544
+ thread_block_tile<Size, thread_block>(g) {}
1545
+ };
1546
+
1547
+ // ParentT = static thread_block_tile<ParentSize, GrandParent> specialization
1548
+ template <unsigned int Size, unsigned int ParentSize, typename GrandParent>
1549
+ struct tiled_partition_impl<Size, thread_block_tile<ParentSize, GrandParent> > :
1550
+ public thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> > {
1551
+ #ifdef _CG_CPP11_FEATURES
1552
+ static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size");
1553
+ #endif
1554
+ _CG_QUALIFIER tiled_partition_impl(const thread_block_tile<ParentSize, GrandParent>& g) :
1555
+ thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> >(g) {}
1556
+ };
1557
+
1558
+ }
1559
+
1560
+ template <unsigned int Size, typename ParentT>
1561
+ _CG_QUALIFIER thread_block_tile<Size, ParentT> tiled_partition(const ParentT& g)
1562
+ {
1563
+ return details::tiled_partition_impl<Size, ParentT>(g);
1564
+ }
1565
+
1566
+ /**
1567
+ * thread_group this_thread()
1568
+ *
1569
+ * Constructs a generic thread_group containing only the calling thread
1570
+ */
1571
+ _CG_QUALIFIER thread_block_tile<1, void> this_thread()
1572
+ {
1573
+ // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its
1574
+ // meta group rank and size set to 0 and 1 respectively.
1575
+ return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block());
1576
+ }
1577
+
1578
+ /**
1579
+ * <group_type>.sync()
1580
+ *
1581
+ * Executes a barrier across the group
1582
+ *
1583
+ * Implements both a compiler fence and an architectural fence to prevent,
1584
+ * memory reordering around the barrier.
1585
+ */
1586
+ _CG_QUALIFIER void thread_group::sync() const
1587
+ {
1588
+ switch (_data.group.type) {
1589
+ case details::coalesced_group_id:
1590
+ cooperative_groups::sync(*static_cast<const coalesced_group*>(this));
1591
+ break;
1592
+ case details::thread_block_id:
1593
+ cooperative_groups::sync(*static_cast<const thread_block*>(this));
1594
+ break;
1595
+ case details::grid_group_id:
1596
+ cooperative_groups::sync(*static_cast<const grid_group*>(this));
1597
+ break;
1598
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1599
+ case details::multi_grid_group_id:
1600
+ cooperative_groups::sync(*static_cast<const multi_grid_group*>(this));
1601
+ break;
1602
+ #endif
1603
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1604
+ case details::cluster_group_id:
1605
+ cooperative_groups::sync(*static_cast<const cluster_group*>(this));
1606
+ break;
1607
+ #endif
1608
+ default:
1609
+ break;
1610
+ }
1611
+ }
1612
+
1613
+ /**
1614
+ * <group_type>.size()
1615
+ *
1616
+ * Returns the total number of threads in the group.
1617
+ */
1618
+ _CG_QUALIFIER unsigned long long thread_group::size() const
1619
+ {
1620
+ unsigned long long size = 0;
1621
+ switch (_data.group.type) {
1622
+ case details::coalesced_group_id:
1623
+ size = cooperative_groups::group_size(*static_cast<const coalesced_group*>(this));
1624
+ break;
1625
+ case details::thread_block_id:
1626
+ size = cooperative_groups::group_size(*static_cast<const thread_block*>(this));
1627
+ break;
1628
+ case details::grid_group_id:
1629
+ size = cooperative_groups::group_size(*static_cast<const grid_group*>(this));
1630
+ break;
1631
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1632
+ case details::multi_grid_group_id:
1633
+ size = cooperative_groups::group_size(*static_cast<const multi_grid_group*>(this));
1634
+ break;
1635
+ #endif
1636
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1637
+ case details::cluster_group_id:
1638
+ size = cooperative_groups::group_size(*static_cast<const cluster_group*>(this));
1639
+ break;
1640
+ #endif
1641
+ default:
1642
+ break;
1643
+ }
1644
+ return size;
1645
+ }
1646
+
1647
+ /**
1648
+ * <group_type>.thread_rank()
1649
+ *
1650
+ * Returns the linearized rank of the calling thread along the interval [0, size()).
1651
+ */
1652
+ _CG_QUALIFIER unsigned long long thread_group::thread_rank() const
1653
+ {
1654
+ unsigned long long rank = 0;
1655
+ switch (_data.group.type) {
1656
+ case details::coalesced_group_id:
1657
+ rank = cooperative_groups::thread_rank(*static_cast<const coalesced_group*>(this));
1658
+ break;
1659
+ case details::thread_block_id:
1660
+ rank = cooperative_groups::thread_rank(*static_cast<const thread_block*>(this));
1661
+ break;
1662
+ case details::grid_group_id:
1663
+ rank = cooperative_groups::thread_rank(*static_cast<const grid_group*>(this));
1664
+ break;
1665
+ #if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
1666
+ case details::multi_grid_group_id:
1667
+ rank = cooperative_groups::thread_rank(*static_cast<const multi_grid_group*>(this));
1668
+ break;
1669
+ #endif
1670
+ #if defined(_CG_HAS_CLUSTER_GROUP)
1671
+ case details::cluster_group_id:
1672
+ rank = cooperative_groups::thread_rank(*static_cast<const cluster_group*>(this));
1673
+ break;
1674
+ #endif
1675
+ default:
1676
+ break;
1677
+ }
1678
+ return rank;
1679
+ }
1680
+
1681
+ _CG_END_NAMESPACE
1682
+
1683
+ #include <cooperative_groups/details/partitioning.h>
1684
+ #if (!defined(_MSC_VER) || defined(_WIN64))
1685
+ # include <cooperative_groups/details/invoke.h>
1686
+ #endif
1687
+
1688
+ # endif /* ! (__cplusplus, __CUDACC__) */
1689
+
1690
+ #endif /* !_COOPERATIVE_GROUPS_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CU_COMPLEX_H_)
51
+ #define CU_COMPLEX_H_
52
+
53
+ #if !defined(__CUDACC_RTC__)
54
+ #if defined(__GNUC__)
55
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
56
+ #pragma GCC diagnostic ignored "-Wunused-function"
57
+ #endif
58
+ #endif
59
+ #endif
60
+
61
+ /* When trying to include C header file in C++ Code extern "C" is required
62
+ * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code
63
+ * extern "C" cannot be nested
64
+ * Hence keep the header out of extern "C" block
65
+ */
66
+
67
+ #if !defined(__CUDACC__)
68
+ #include <math.h> /* import fabsf, sqrt */
69
+ #endif /* !defined(__CUDACC__) */
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif /* __cplusplus */
74
+
75
+ #include "vector_types.h"
76
+
77
+ typedef float2 cuFloatComplex;
78
+
79
+ __host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x)
80
+ {
81
+ return x.x;
82
+ }
83
+
84
+ __host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x)
85
+ {
86
+ return x.y;
87
+ }
88
+
89
+ __host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex
90
+ (float r, float i)
91
+ {
92
+ cuFloatComplex res;
93
+ res.x = r;
94
+ res.y = i;
95
+ return res;
96
+ }
97
+
98
+ __host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x)
99
+ {
100
+ return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x));
101
+ }
102
+ __host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x,
103
+ cuFloatComplex y)
104
+ {
105
+ return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y),
106
+ cuCimagf(x) + cuCimagf(y));
107
+ }
108
+
109
+ __host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x,
110
+ cuFloatComplex y)
111
+ {
112
+ return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y),
113
+ cuCimagf(x) - cuCimagf(y));
114
+ }
115
+
116
+ /* This implementation could suffer from intermediate overflow even though
117
+ * the final result would be in range. However, various implementations do
118
+ * not guard against this (presumably to avoid losing performance), so we
119
+ * don't do it either to stay competitive.
120
+ */
121
+ __host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x,
122
+ cuFloatComplex y)
123
+ {
124
+ cuFloatComplex prod;
125
+ prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) -
126
+ (cuCimagf(x) * cuCimagf(y)),
127
+ (cuCrealf(x) * cuCimagf(y)) +
128
+ (cuCimagf(x) * cuCrealf(y)));
129
+ return prod;
130
+ }
131
+
132
+ /* This implementation guards against intermediate underflow and overflow
133
+ * by scaling. Such guarded implementations are usually the default for
134
+ * complex library implementations, with some also offering an unguarded,
135
+ * faster version.
136
+ */
137
+ __host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x,
138
+ cuFloatComplex y)
139
+ {
140
+ cuFloatComplex quot;
141
+ float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y));
142
+ float oos = 1.0f / s;
143
+ float ars = cuCrealf(x) * oos;
144
+ float ais = cuCimagf(x) * oos;
145
+ float brs = cuCrealf(y) * oos;
146
+ float bis = cuCimagf(y) * oos;
147
+ s = (brs * brs) + (bis * bis);
148
+ oos = 1.0f / s;
149
+ quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos,
150
+ ((ais * brs) - (ars * bis)) * oos);
151
+ return quot;
152
+ }
153
+
154
+ /*
155
+ * We would like to call hypotf(), but it's not available on all platforms.
156
+ * This discrete implementation guards against intermediate underflow and
157
+ * overflow by scaling. Otherwise we would lose half the exponent range.
158
+ * There are various ways of doing guarded computation. For now chose the
159
+ * simplest and fastest solution, however this may suffer from inaccuracies
160
+ * if sqrt and division are not IEEE compliant.
161
+ */
162
+ __host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x)
163
+ {
164
+ float a = cuCrealf(x);
165
+ float b = cuCimagf(x);
166
+ float v, w, t;
167
+ a = fabsf(a);
168
+ b = fabsf(b);
169
+ if (a > b) {
170
+ v = a;
171
+ w = b;
172
+ } else {
173
+ v = b;
174
+ w = a;
175
+ }
176
+ t = w / v;
177
+ t = 1.0f + t * t;
178
+ t = v * sqrtf(t);
179
+ if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
180
+ t = v + w;
181
+ }
182
+ return t;
183
+ }
184
+
185
+ /* Double precision */
186
+ typedef double2 cuDoubleComplex;
187
+
188
+ __host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x)
189
+ {
190
+ return x.x;
191
+ }
192
+
193
+ __host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x)
194
+ {
195
+ return x.y;
196
+ }
197
+
198
+ __host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex
199
+ (double r, double i)
200
+ {
201
+ cuDoubleComplex res;
202
+ res.x = r;
203
+ res.y = i;
204
+ return res;
205
+ }
206
+
207
+ __host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x)
208
+ {
209
+ return make_cuDoubleComplex (cuCreal(x), -cuCimag(x));
210
+ }
211
+
212
+ __host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x,
213
+ cuDoubleComplex y)
214
+ {
215
+ return make_cuDoubleComplex (cuCreal(x) + cuCreal(y),
216
+ cuCimag(x) + cuCimag(y));
217
+ }
218
+
219
+ __host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x,
220
+ cuDoubleComplex y)
221
+ {
222
+ return make_cuDoubleComplex (cuCreal(x) - cuCreal(y),
223
+ cuCimag(x) - cuCimag(y));
224
+ }
225
+
226
+ /* This implementation could suffer from intermediate overflow even though
227
+ * the final result would be in range. However, various implementations do
228
+ * not guard against this (presumably to avoid losing performance), so we
229
+ * don't do it either to stay competitive.
230
+ */
231
+ __host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x,
232
+ cuDoubleComplex y)
233
+ {
234
+ cuDoubleComplex prod;
235
+ prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) -
236
+ (cuCimag(x) * cuCimag(y)),
237
+ (cuCreal(x) * cuCimag(y)) +
238
+ (cuCimag(x) * cuCreal(y)));
239
+ return prod;
240
+ }
241
+
242
+ /* This implementation guards against intermediate underflow and overflow
243
+ * by scaling. Such guarded implementations are usually the default for
244
+ * complex library implementations, with some also offering an unguarded,
245
+ * faster version.
246
+ */
247
+ __host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x,
248
+ cuDoubleComplex y)
249
+ {
250
+ cuDoubleComplex quot;
251
+ double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y)));
252
+ double oos = 1.0 / s;
253
+ double ars = cuCreal(x) * oos;
254
+ double ais = cuCimag(x) * oos;
255
+ double brs = cuCreal(y) * oos;
256
+ double bis = cuCimag(y) * oos;
257
+ s = (brs * brs) + (bis * bis);
258
+ oos = 1.0 / s;
259
+ quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos,
260
+ ((ais * brs) - (ars * bis)) * oos);
261
+ return quot;
262
+ }
263
+
264
+ /* This implementation guards against intermediate underflow and overflow
265
+ * by scaling. Otherwise we would lose half the exponent range. There are
266
+ * various ways of doing guarded computation. For now chose the simplest
267
+ * and fastest solution, however this may suffer from inaccuracies if sqrt
268
+ * and division are not IEEE compliant.
269
+ */
270
+ __host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x)
271
+ {
272
+ double a = cuCreal(x);
273
+ double b = cuCimag(x);
274
+ double v, w, t;
275
+ a = fabs(a);
276
+ b = fabs(b);
277
+ if (a > b) {
278
+ v = a;
279
+ w = b;
280
+ } else {
281
+ v = b;
282
+ w = a;
283
+ }
284
+ t = w / v;
285
+ t = 1.0 + t * t;
286
+ t = v * sqrt(t);
287
+ if ((v == 0.0) ||
288
+ (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) {
289
+ t = v + w;
290
+ }
291
+ return t;
292
+ }
293
+
294
+ #if defined(__cplusplus)
295
+ }
296
+ #endif /* __cplusplus */
297
+
298
+ /* aliases */
299
+ typedef cuFloatComplex cuComplex;
300
+ __host__ __device__ static __inline__ cuComplex make_cuComplex (float x,
301
+ float y)
302
+ {
303
+ return make_cuFloatComplex (x, y);
304
+ }
305
+
306
+ /* float-to-double promotion */
307
+ __host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble
308
+ (cuFloatComplex c)
309
+ {
310
+ return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c));
311
+ }
312
+
313
+ __host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat
314
+ (cuDoubleComplex c)
315
+ {
316
+ return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c));
317
+ }
318
+
319
+
320
+ __host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d)
321
+ {
322
+ float real_res;
323
+ float imag_res;
324
+
325
+ real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d);
326
+ imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d);
327
+
328
+ real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res;
329
+ imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res;
330
+
331
+ return make_cuComplex(real_res, imag_res);
332
+ }
333
+
334
+ __host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d)
335
+ {
336
+ double real_res;
337
+ double imag_res;
338
+
339
+ real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d);
340
+ imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d);
341
+
342
+ real_res = -(cuCimag(x) * cuCimag(y)) + real_res;
343
+ imag_res = (cuCimag(x) * cuCreal(y)) + imag_res;
344
+
345
+ return make_cuDoubleComplex(real_res, imag_res);
346
+ }
347
+
348
+ #endif /* !defined(CU_COMPLEX_H_) */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_PRIMITIVES_H_
51
+ #define _CUDA_AWBARRIER_PRIMITIVES_H_
52
+
53
+ #include "cuda_awbarrier_helpers.h"
54
+
55
+ #if !defined(_CUDA_AWBARRIER_SM_TARGET)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ #endif
58
+
59
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__
60
+ uint32_t __mbarrier_maximum_count() {
61
+ return _CUDA_AWBARRIER_MAX_COUNT;
62
+ }
63
+
64
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
65
+ void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) {
66
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count);
67
+ }
68
+
69
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
70
+ void __mbarrier_inval(__mbarrier_t* barrier) {
71
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier);
72
+ }
73
+
74
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
75
+ __mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) {
76
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(barrier);
77
+ }
78
+
79
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
80
+ __mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) {
81
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(barrier);
82
+ }
83
+
84
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
85
+ bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) {
86
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token);
87
+ }
88
+
89
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
90
+ uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) {
91
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token);
92
+ }
93
+
94
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
95
+ bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) {
96
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity);
97
+ }
98
+
99
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
100
+ bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) {
101
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec);
102
+ }
103
+
104
+ _CUDA_AWBARRIER_STATIC_QUALIFIER
105
+ bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) {
106
+ return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec);
107
+ }
108
+
109
+ #endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp ADDED
@@ -0,0 +1,1546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2022 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_FP8_HPP__)
51
+ #define __CUDA_FP8_HPP__
52
+
53
+ #if !defined(__CUDA_FP8_H__)
54
+ #error "Do not include this file directly. Instead, include cuda_fp8.h."
55
+ #endif
56
+
57
+ /* C++ header for std::memcpy (used for type punning in host-side
58
+ * implementations). When compiling as a CUDA source file memcpy is provided
59
+ * implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
60
+ */
61
+ #if defined(__cplusplus) && !defined(__CUDACC__)
62
+ #include <cstring>
63
+ #elif !defined(__cplusplus) && !defined(__CUDACC__)
64
+ #include <string.h>
65
+ #endif /* defined(__cplusplus) && !defined(__CUDACC__) */
66
+
67
+ /* Set up structure-alignment attribute */
68
+ #if !(defined __CUDA_ALIGN__)
69
+ #if defined(__CUDACC__)
70
+ #define __CUDA_ALIGN__(align) __align__(align)
71
+ #else
72
+ /* Define alignment macro based on compiler type (cannot assume C11 "_Alignas"
73
+ * is available) */
74
+ #if __cplusplus >= 201103L
75
+ #define __CUDA_ALIGN__(n) \
76
+ alignas(n) /* C++11 kindly gives us a keyword for this */
77
+ #else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/
78
+ #if defined(__GNUC__)
79
+ #define __CUDA_ALIGN__(n) __attribute__((aligned(n)))
80
+ #elif defined(_MSC_VER)
81
+ #define __CUDA_ALIGN__(n) __declspec(align(n))
82
+ #else
83
+ #define __CUDA_ALIGN__(n)
84
+ #endif /* defined(__GNUC__) */
85
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
86
+ #endif /* defined(__CUDACC__) */
87
+ #endif /* !(defined __CUDA_ALIGN__) */
88
+
89
+ #if !(defined __CPP_VERSION_AT_LEAST_11_FP8)
90
+ /* need c++11 for explicit operators */
91
+ #define __CUDA_NO_FP8_CONVERSION_OPERATORS__
92
+ #endif
93
+
94
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
95
+ __nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
96
+ const __nv_fp8_interpretation_t fp8_interpretation) {
97
+ unsigned char res;
98
+ unsigned long long int xbits;
99
+
100
+ #if defined(__CUDACC__) || (!defined __cplusplus)
101
+ (void)memcpy(&xbits, &x, sizeof(x));
102
+ #else
103
+ (void)std::memcpy(&xbits, &x, sizeof(x));
104
+ #endif
105
+ unsigned char FP8_MAXNORM;
106
+ unsigned char FP8_MANTISSA_MASK;
107
+ unsigned short int FP8_EXP_BIAS;
108
+ unsigned long long int FP8_SIGNIFICAND_BITS;
109
+ const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL;
110
+ unsigned long long int FP8_MINDENORM_O2;
111
+ unsigned long long int FP8_OVERFLOW_THRESHOLD;
112
+ unsigned long long int FP8_MINNORM;
113
+
114
+ if (fp8_interpretation == __NV_E4M3) {
115
+ FP8_EXP_BIAS = 7U;
116
+ FP8_SIGNIFICAND_BITS = 4ULL;
117
+ FP8_MANTISSA_MASK = 0x7U;
118
+ FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10
119
+ FP8_OVERFLOW_THRESHOLD =
120
+ 0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4
121
+ FP8_MAXNORM = 0x7EU;
122
+ FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6
123
+ } else { //__NV_E5M2
124
+ FP8_EXP_BIAS = 15U;
125
+ FP8_SIGNIFICAND_BITS = 3ULL;
126
+ FP8_MANTISSA_MASK = 0x3U;
127
+ FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17
128
+ FP8_OVERFLOW_THRESHOLD =
129
+ 0x40EE000000000000ULL -
130
+ 1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code
131
+ FP8_MAXNORM = 0x7BU;
132
+ FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14
133
+ }
134
+
135
+ // 1/2 LSB of the target format, positioned in double precision mantissa
136
+ // helpful in midpoints detection during round-to-nearest-even step
137
+ const unsigned long long int FP8_DP_HALF_ULP =
138
+ (unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL);
139
+ // prepare sign bit in target format
140
+ unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U);
141
+ // prepare exponent field in target format
142
+ unsigned char exp =
143
+ (unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) -
144
+ 1023U + FP8_EXP_BIAS);
145
+ // round mantissa to target format width, rounding towards zero
146
+ unsigned char mantissa =
147
+ (unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) &
148
+ FP8_MANTISSA_MASK;
149
+ unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL;
150
+
151
+ if (absx <= FP8_MINDENORM_O2) {
152
+ // zero or underflow
153
+ res = 0U;
154
+ } else if (absx > DP_INF_BITS) {
155
+ // NaN
156
+ if (fp8_interpretation == __NV_E4M3) {
157
+ res = 0x7FU;
158
+ } else {
159
+ // NaN --> QNaN
160
+ res = 0x7EU | mantissa;
161
+ }
162
+ } else if (absx > FP8_OVERFLOW_THRESHOLD) {
163
+ if (saturate == __NV_SATFINITE) {
164
+ res = FP8_MAXNORM;
165
+ } else {
166
+ // __NV_NOSAT
167
+ if (fp8_interpretation == __NV_E4M3) {
168
+ // no Inf in E4M3
169
+ res = 0x7FU; // NaN
170
+ } else {
171
+ res = 0x7CU; // Inf in E5M2
172
+ }
173
+ }
174
+ } else if (absx >= FP8_MINNORM) {
175
+ res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa);
176
+ // rounded-off bits
177
+ unsigned long long int round =
178
+ xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL);
179
+ // round-to-nearest-even adjustment
180
+ if ((round > FP8_DP_HALF_ULP) ||
181
+ ((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) {
182
+ res = (unsigned char)(res + 1U);
183
+ }
184
+ } else // Denormal range
185
+ {
186
+ unsigned char shift = (unsigned char)(1U - exp);
187
+ // add implicit leading bit
188
+ mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U));
189
+ // additional round-off due to denormalization
190
+ res = (unsigned char)(mantissa >> shift);
191
+
192
+ // rounded-off bits, including implicit leading bit
193
+ unsigned long long int round =
194
+ (xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) &
195
+ ((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL);
196
+ // round-to-nearest-even adjustment
197
+ if ((round > (FP8_DP_HALF_ULP << shift)) ||
198
+ ((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) {
199
+ res = (unsigned char)(res + 1U);
200
+ }
201
+ }
202
+
203
+ res |= sign;
204
+
205
+ return (__nv_fp8_storage_t)res;
206
+ }
207
+
208
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
209
+ __nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
210
+ const __nv_fp8_interpretation_t fp8_interpretation) {
211
+ __nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8(
212
+ x.y, saturate, fp8_interpretation);
213
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
214
+ storage = (__nv_fp8x2_storage_t)(storage |
215
+ __nv_cvt_double_to_fp8(
216
+ x.x, saturate, fp8_interpretation));
217
+ return storage;
218
+ }
219
+
220
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
221
+ __nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
222
+ const __nv_fp8_interpretation_t fp8_interpretation) {
223
+ __nv_fp8_storage_t res = 0U;
224
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
225
+ if (saturate == __NV_SATFINITE) {
226
+ __nv_fp8x2_storage_t storage;
227
+ if (fp8_interpretation == __NV_E5M2) {
228
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
229
+ : "=h"(storage)
230
+ : "f"(x), "f"(0.0f));
231
+ } else {
232
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
233
+ : "=h"(storage)
234
+ : "f"(x), "f"(0.0f));
235
+ }
236
+ res = (__nv_fp8_storage_t)storage;
237
+ } else
238
+ #endif
239
+ {
240
+ unsigned int xbits;
241
+ #if defined(__CUDACC__) || (!defined __cplusplus)
242
+ (void)memcpy(&xbits, &x, sizeof(x));
243
+ #else
244
+ (void)std::memcpy(&xbits, &x, sizeof(x));
245
+ #endif
246
+
247
+ // isnan
248
+ if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) {
249
+ // Canonical NaN
250
+ xbits = 0x7FFFFFFFU;
251
+ }
252
+
253
+ float fx;
254
+ #if defined(__CUDACC__) || (!defined __cplusplus)
255
+ (void)memcpy(&fx, &xbits, sizeof(xbits));
256
+ #else
257
+ (void)std::memcpy(&fx, &xbits, sizeof(xbits));
258
+ #endif
259
+
260
+ const double dx = (double)fx;
261
+ res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation);
262
+ }
263
+ return res;
264
+ }
265
+
266
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
267
+ __nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
268
+ const __nv_fp8_interpretation_t fp8_interpretation) {
269
+ __nv_fp8x2_storage_t storage;
270
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
271
+ if (saturate == __NV_SATFINITE) {
272
+ if (fp8_interpretation == __NV_E5M2) {
273
+ asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
274
+ : "=h"(storage)
275
+ : "f"(x.x), "f"(x.y));
276
+ } else {
277
+ asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
278
+ : "=h"(storage)
279
+ : "f"(x.x), "f"(x.y));
280
+ }
281
+ } else
282
+ #endif
283
+ {
284
+ storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8(
285
+ x.y, saturate, fp8_interpretation);
286
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
287
+ storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8(
288
+ x.x, saturate,
289
+ fp8_interpretation));
290
+ }
291
+ return storage;
292
+ }
293
+
294
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
295
+ __internal_halfraw_to_float(const __half_raw x) {
296
+ float f;
297
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
298
+ asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x));
299
+ #else
300
+ const unsigned int ux = (unsigned int)x.x;
301
+ unsigned int sign = (ux >> 15U) & 1U;
302
+ unsigned int exponent = (ux >> 10U) & 0x1fU;
303
+ unsigned int mantissa = (ux & 0x3ffU) << 13U;
304
+ if (exponent == 0x1fU) { /* NaN or Inf */
305
+ /* discard sign of a NaN */
306
+ sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
307
+ mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
308
+ exponent = 0xffU;
309
+ } else if (exponent == 0U) { /* Denorm or Zero */
310
+ if (mantissa != 0U) {
311
+ unsigned int msb;
312
+ exponent = 0x71U;
313
+ do {
314
+ msb = (mantissa & 0x400000U);
315
+ mantissa <<= 1U; /* normalize */
316
+ --exponent;
317
+ } while (msb == 0U);
318
+ mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
319
+ }
320
+ } else {
321
+ exponent += 0x70U;
322
+ }
323
+ const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
324
+ #if defined(__CUDACC__) || (!defined __cplusplus)
325
+ (void)memcpy(&f, &u, sizeof(u));
326
+ #else
327
+ (void)std::memcpy(&f, &u, sizeof(u));
328
+ #endif
329
+ #endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */
330
+ return f;
331
+ }
332
+
333
+ __CUDA_HOSTDEVICE_FP8_DECL__ float2
334
+ __internal_halfraw2_to_float2(const __half2_raw x) {
335
+ __half_raw raw;
336
+ float2 res;
337
+ raw.x = x.x;
338
+ res.x = __internal_halfraw_to_float(raw);
339
+ raw.x = x.y;
340
+ res.y = __internal_halfraw_to_float(raw);
341
+ return res;
342
+ }
343
+
344
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
345
+ __nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
346
+ const __nv_fp8_interpretation_t fp8_interpretation) {
347
+ __nv_fp8_storage_t res = 0U;
348
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
349
+ if (saturate == __NV_SATFINITE) {
350
+ unsigned int half2_storage = (unsigned int)(x.x);
351
+ __nv_fp8x2_storage_t tmp;
352
+ if (fp8_interpretation == __NV_E5M2) {
353
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
354
+ : "=h"(tmp)
355
+ : "r"(half2_storage));
356
+ } else {
357
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
358
+ : "=h"(tmp)
359
+ : "r"(half2_storage));
360
+ }
361
+ res = (__nv_fp8_storage_t)tmp;
362
+ } else
363
+ #endif
364
+ {
365
+ float fx = __internal_halfraw_to_float(x);
366
+ res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
367
+ }
368
+ return res;
369
+ }
370
+
371
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
372
+ const __half2_raw x, const __nv_saturation_t saturate,
373
+ const __nv_fp8_interpretation_t fp8_interpretation) {
374
+ __nv_fp8x2_storage_t tmp;
375
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
376
+ if (saturate == __NV_SATFINITE) {
377
+ unsigned int half2_storage;
378
+ (void)memcpy(&half2_storage, &x, sizeof(x));
379
+
380
+ if (fp8_interpretation == __NV_E5M2) {
381
+ asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
382
+ : "=h"(tmp)
383
+ : "r"(half2_storage));
384
+ } else {
385
+ asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
386
+ : "=h"(tmp)
387
+ : "r"(half2_storage));
388
+ }
389
+ } else
390
+ #endif
391
+ {
392
+ __half_raw raw;
393
+ raw.x = x.x;
394
+ __nv_fp8_storage_t lo =
395
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
396
+ raw.x = x.y;
397
+ __nv_fp8_storage_t hi =
398
+ __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
399
+ tmp = hi;
400
+ tmp = (__nv_fp8x2_storage_t)(tmp << 8U);
401
+ tmp = (__nv_fp8x2_storage_t)(tmp | lo);
402
+ }
403
+ return tmp;
404
+ }
405
+
406
+ __CUDA_HOSTDEVICE_FP8_DECL__ float
407
+ __internal_bf16raw_to_float(const __nv_bfloat16_raw x) {
408
+ const unsigned int ux = ((unsigned int)x.x) << 16U;
409
+ float fx;
410
+ #if defined(__CUDACC__) || (!defined __cplusplus)
411
+ (void)memcpy(&fx, &ux, sizeof(ux));
412
+ #else
413
+ (void)std::memcpy(&fx, &ux, sizeof(ux));
414
+ #endif
415
+ return fx;
416
+ }
417
+
418
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw
419
+ __internal_float_to_bf16raw_rz(const float x) {
420
+ unsigned int ux;
421
+ __nv_bfloat16_raw r;
422
+ #if defined(__CUDACC__) || (!defined __cplusplus)
423
+ (void)memcpy(&ux, &x, sizeof(x));
424
+ #else
425
+ (void)std::memcpy(&ux, &x, sizeof(x));
426
+ #endif
427
+ r.x = (unsigned short int)(ux >> 16U);
428
+ return r;
429
+ }
430
+
431
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
432
+ const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
433
+ const __nv_fp8_interpretation_t fp8_interpretation) {
434
+ const float fx = __internal_bf16raw_to_float(x);
435
+ const __nv_fp8_storage_t res =
436
+ __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
437
+ return res;
438
+ }
439
+
440
+ __CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
441
+ __nv_cvt_bfloat16raw2_to_fp8x2(
442
+ const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
443
+ const __nv_fp8_interpretation_t fp8_interpretation) {
444
+ __nv_bfloat16_raw raw;
445
+ raw.x = x.y;
446
+ __nv_fp8x2_storage_t storage =
447
+ (__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate,
448
+ fp8_interpretation);
449
+ storage = (__nv_fp8x2_storage_t)(storage << 8U);
450
+ raw.x = x.x;
451
+ storage = (__nv_fp8x2_storage_t)(storage |
452
+ __nv_cvt_bfloat16raw_to_fp8(
453
+ raw, saturate, fp8_interpretation));
454
+ return storage;
455
+ }
456
+
457
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
458
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
459
+ const __nv_fp8_interpretation_t fp8_interpretation);
460
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
461
+ __nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
462
+ const __nv_fp8_interpretation_t fp8_interpretation) {
463
+ __half_raw res;
464
+ res.x = 0U;
465
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
466
+ res.x =
467
+ __nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation)
468
+ .x;
469
+ #else
470
+ unsigned short int ur = (unsigned short int)x;
471
+ ur = (unsigned short int)(ur << 8U);
472
+
473
+ if (fp8_interpretation == __NV_E5M2) {
474
+ if ((ur & 0x7FFFU) > 0x7C00U) {
475
+ /* If NaN, return canonical NaN */
476
+ ur = 0x7FFFU;
477
+ }
478
+ } else { // __NV_E4M3
479
+ unsigned short int sign = ur & 0x8000U;
480
+ unsigned short int exponent =
481
+ (unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U);
482
+ unsigned short int mantissa = (ur & 0x0700U) >> 1U;
483
+ unsigned char absx = 0x7FU & (unsigned char)x;
484
+
485
+ if (absx == 0x7FU) // NaN
486
+ {
487
+ ur = 0x7FFFU; // fp16 canonical NaN, discard sign
488
+ } else if (exponent == 0x2000U) {
489
+ // zero or denormal
490
+ if (mantissa != 0U) {
491
+ // normalize
492
+ mantissa = (unsigned short int)(mantissa << 1U);
493
+ while ((mantissa & 0x0400U) == 0U) {
494
+ mantissa = (unsigned short int)(mantissa << 1U);
495
+ exponent = (unsigned short int)(exponent - 0x0400U);
496
+ }
497
+ // discard implicit leading bit
498
+ mantissa &= 0x03FFU;
499
+ } else { // Zero
500
+ exponent = 0U;
501
+ }
502
+
503
+ ur = (sign | exponent) | mantissa;
504
+ } else {
505
+ ur = (sign | exponent) | mantissa;
506
+ }
507
+ }
508
+ res.x = ur;
509
+ #endif
510
+ return res;
511
+ }
512
+
513
+ __CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
514
+ __nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
515
+ const __nv_fp8_interpretation_t fp8_interpretation) {
516
+ __half2_raw res;
517
+ #if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
518
+ unsigned int half2_storage;
519
+ if (fp8_interpretation == __NV_E5M2) {
520
+ asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
521
+ } else {
522
+ asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
523
+ }
524
+ (void)memcpy(&res, &half2_storage, sizeof(half2_storage));
525
+ #else
526
+ res.x =
527
+ __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x;
528
+ res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U),
529
+ fp8_interpretation)
530
+ .x;
531
+ #endif
532
+ return res;
533
+ }
534
+
535
+ /* All other definitions in this file are only visible to C++ compilers */
536
+ #if defined(__cplusplus)
537
+
538
+ /**
539
+ * \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind.
540
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
541
+ */
542
+
543
+ /**
544
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
545
+ * \brief __nv_fp8_e5m2 datatype
546
+ *
547
+ * \details This structure implements the datatype for handling
548
+ * \p fp8 floating-point numbers of \p e5m2 kind:
549
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
550
+ *
551
+ * The structure implements converting constructors and operators.
552
+ */
553
+ struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 {
554
+ public:
555
+ /**
556
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
557
+ * Storage variable contains the \p fp8 floating-point data.
558
+ */
559
+ __nv_fp8_storage_t __x;
560
+
561
+ /**
562
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
563
+ * Constructor by default.
564
+ */
565
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
566
+ __nv_fp8_e5m2() = default;
567
+ #else
568
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {}
569
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
570
+
571
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
572
+
573
+ /* Construct from wider FP types */
574
+ /* Note we do avoid constructor init-list because of special host/device
575
+ * compilation rules */
576
+
577
+ /**
578
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
579
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
580
+ * behavior for out-of-range values.
581
+ */
582
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) {
583
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
584
+ __NV_SATFINITE, __NV_E5M2);
585
+ }
586
+ /**
587
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
588
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
589
+ * behavior for out-of-range values.
590
+ */
591
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) {
592
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
593
+ __NV_SATFINITE, __NV_E5M2);
594
+ }
595
+ /**
596
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
597
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
598
+ * for out-of-range values.
599
+ */
600
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) {
601
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
602
+ }
603
+ /**
604
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
605
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
606
+ * behavior for out-of-range values.
607
+ */
608
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) {
609
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
610
+ }
611
+
612
+ /* Converts from integral */
613
+
614
+ /**
615
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
616
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
617
+ * __NV_SATFINITE behavior for out-of-range values.
618
+ */
619
+ explicit __CUDA_HOSTDEVICE_FP8__
620
+ __nv_fp8_e5m2(const unsigned short int val) {
621
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
622
+ }
623
+ /**
624
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
625
+ * Constructor from \p unsigned \p int data type, relies on \p
626
+ * __NV_SATFINITE behavior for out-of-range values.
627
+ */
628
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) {
629
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
630
+ }
631
+ /**
632
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
633
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
634
+ * \p __NV_SATFINITE behavior for out-of-range values.
635
+ */
636
+ explicit __CUDA_HOSTDEVICE_FP8__
637
+ __nv_fp8_e5m2(const unsigned long long int val) {
638
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
639
+ }
640
+
641
+ /**
642
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
643
+ * Constructor from \p short \p int data type.
644
+ */
645
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) {
646
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
647
+ }
648
+ /**
649
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
650
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
651
+ * for out-of-range values.
652
+ */
653
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) {
654
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
655
+ }
656
+ /**
657
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
658
+ * Constructor from \p long \p long \p int data type, relies on \p
659
+ * __NV_SATFINITE behavior for out-of-range values.
660
+ */
661
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) {
662
+ __x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
663
+ }
664
+
665
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
666
+ /* Widening FP converts */
667
+ /**
668
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
669
+ * Conversion operator to \p __half data type.
670
+ */
671
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
672
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
673
+ }
674
+ /**
675
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
676
+ * Conversion operator to \p float data type.
677
+ */
678
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
679
+ return __internal_halfraw_to_float(
680
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
681
+ }
682
+ /**
683
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
684
+ * Conversion operator to \p __nv_bfloat16 data type.
685
+ */
686
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
687
+ return static_cast<__nv_bfloat16>(
688
+ __internal_float_to_bf16raw_rz(float(*this)));
689
+ }
690
+ /**
691
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
692
+ * Conversion operator to \p double data type.
693
+ */
694
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
695
+ return static_cast<double>(float(*this));
696
+ }
697
+
698
+ /* Convert to integral */
699
+
700
+ /**
701
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
702
+ * Conversion operator to \p unsigned \p char data type.
703
+ * Clamps negative and too large inputs to the output range.
704
+ * \p NaN inputs convert to \p zero.
705
+ */
706
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
707
+ unsigned char i;
708
+ const float f = float(*this);
709
+ const unsigned char max_val = 0xFFU;
710
+ const unsigned char min_val = 0U;
711
+ const unsigned char bits = (*this).__x;
712
+ // saturation fixup
713
+ if ((bits & 0x7FU) > 0x7CU) {
714
+ // NaN
715
+ i = 0;
716
+ } else if (f > static_cast<float>(max_val)) {
717
+ // saturate maximum
718
+ i = max_val;
719
+ } else if (f < static_cast<float>(min_val)) {
720
+ // saturate minimum
721
+ i = min_val;
722
+ } else {
723
+ // normal value
724
+ i = static_cast<unsigned char>(f);
725
+ }
726
+ return i;
727
+ }
728
+ /**
729
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
730
+ * Conversion operator to \p unsigned \p short \p int data type.
731
+ * Clamps negative and too large inputs to the output range.
732
+ * \p NaN inputs convert to \p zero.
733
+ */
734
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
735
+ return __half2ushort_rz(__half(*this));
736
+ }
737
+ /**
738
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
739
+ * Conversion operator to \p unsigned \p int data type.
740
+ * Clamps negative and too large inputs to the output range.
741
+ * \p NaN inputs convert to \p zero.
742
+ */
743
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
744
+ return __half2uint_rz(__half(*this));
745
+ }
746
+ /**
747
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
748
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
749
+ * Clamps negative and too large inputs to the output range.
750
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
751
+ */
752
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
753
+ return __half2ull_rz(__half(*this));
754
+ }
755
+
756
+ /**
757
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
758
+ * Conversion operator to \p signed \p char data type.
759
+ * Clamps too large inputs to the output range.
760
+ * \p NaN inputs convert to \p zero.
761
+ */
762
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
763
+ signed char i;
764
+ const float f = float(*this);
765
+ const signed char max_val = (signed char)0x7FU;
766
+ const signed char min_val = (signed char)0x80U;
767
+ const unsigned char bits = (*this).__x;
768
+ // saturation fixup
769
+ if ((bits & 0x7FU) > 0x7CU) {
770
+ // NaN
771
+ i = 0;
772
+ } else if (f > static_cast<float>(max_val)) {
773
+ // saturate maximum
774
+ i = max_val;
775
+ } else if (f < static_cast<float>(min_val)) {
776
+ // saturate minimum
777
+ i = min_val;
778
+ } else {
779
+ // normal value
780
+ i = static_cast<signed char>(f);
781
+ }
782
+ return i;
783
+ }
784
+ /**
785
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
786
+ * Conversion operator to \p short \p int data type.
787
+ * Clamps too large inputs to the output range.
788
+ * \p NaN inputs convert to \p zero.
789
+ */
790
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
791
+ return __half2short_rz(__half(*this));
792
+ }
793
+ /**
794
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
795
+ * Conversion operator to \p int data type.
796
+ * Clamps too large inputs to the output range.
797
+ * \p NaN inputs convert to \p zero.
798
+ */
799
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
800
+ return __half2int_rz(__half(*this));
801
+ }
802
+ /**
803
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
804
+ * Conversion operator to \p long \p long \p int data type.
805
+ * Clamps too large inputs to the output range.
806
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
807
+ */
808
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
809
+ return __half2ll_rz(__half(*this));
810
+ }
811
+
812
+ /**
813
+ * \ingroup CUDA_MATH_FP8_E5M2_STRUCT
814
+ * Conversion operator to \p bool data type.
815
+ * +0 and -0 inputs convert to \p false.
816
+ * Non-zero inputs convert to \p true.
817
+ */
818
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
819
+ return (__x & 0x7FU) != 0U;
820
+ }
821
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
822
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
823
+ };
824
+
825
+ /**
826
+ * \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind.
827
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
828
+ */
829
+
830
+ /**
831
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
832
+ * \brief __nv_fp8x2_e5m2 datatype
833
+ *
834
+ * \details This structure implements the datatype for handling two
835
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
836
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
837
+ *
838
+ * The structure implements converting constructors and operators.
839
+ */
840
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 {
841
+ public:
842
+ /**
843
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
844
+ * Storage variable contains the vector of two \p fp8 floating-point data
845
+ * values.
846
+ */
847
+ __nv_fp8x2_storage_t __x;
848
+
849
+ /**
850
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
851
+ * Constructor by default.
852
+ */
853
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
854
+ __nv_fp8x2_e5m2() = default;
855
+ #else
856
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {}
857
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
858
+
859
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
860
+
861
+ /* Construct from wider types */
862
+
863
+ /**
864
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
865
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
866
+ * behavior for out-of-range values.
867
+ */
868
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) {
869
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
870
+ __NV_SATFINITE, __NV_E5M2);
871
+ }
872
+ /**
873
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
874
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
875
+ * behavior for out-of-range values.
876
+ */
877
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) {
878
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
879
+ __NV_SATFINITE, __NV_E5M2);
880
+ }
881
+ /**
882
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
883
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
884
+ * behavior for out-of-range values.
885
+ */
886
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) {
887
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
888
+ }
889
+ /**
890
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
891
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
892
+ * behavior for out-of-range values.
893
+ */
894
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) {
895
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
896
+ }
897
+
898
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
899
+ /* Widening converts */
900
+ /**
901
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
902
+ * Conversion operator to \p __half2 data type.
903
+ */
904
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
905
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
906
+ }
907
+ /**
908
+ * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
909
+ * Conversion operator to \p float2 data type.
910
+ */
911
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
912
+ return __internal_halfraw2_to_float2(
913
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
914
+ }
915
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
916
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
917
+ };
918
+
919
+ __CUDA_HOSTDEVICE_FP8_DECL__ unsigned int
920
+ __internal_pack_u16x2_to_u32(const unsigned short int src_lo,
921
+ const unsigned short int src_hi) {
922
+ unsigned int dst;
923
+ #if (defined __CUDACC__) && (defined __CUDA_ARCH__)
924
+ asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi));
925
+ #else
926
+ dst = (static_cast<unsigned int>(src_hi) << 16U) |
927
+ static_cast<unsigned int>(src_lo);
928
+ #endif
929
+ return dst;
930
+ }
931
+
932
+ /**
933
+ * \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind.
934
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
935
+ */
936
+
937
+ /**
938
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
939
+ * \brief __nv_fp8x4_e5m2 datatype
940
+ *
941
+ * \details This structure implements the datatype for handling four
942
+ * \p fp8 floating-point numbers of \p e5m2 kind each:
943
+ * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
944
+ *
945
+ * The structure implements converting constructors and operators.
946
+ */
947
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 {
948
+ public:
949
+ /**
950
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
951
+ * Storage variable contains the vector of four \p fp8 floating-point data
952
+ * values.
953
+ */
954
+ __nv_fp8x4_storage_t __x;
955
+
956
+ /**
957
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
958
+ * Constructor by default.
959
+ */
960
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
961
+ __nv_fp8x4_e5m2() = default;
962
+ #else
963
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {}
964
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
965
+
966
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
967
+
968
+ /* Construct from wider types */
969
+
970
+ /**
971
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
972
+ * Constructor from a pair of \p __half2 data type values,
973
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
974
+ */
975
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo,
976
+ const __half2 fhi) {
977
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
978
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2);
979
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
980
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
981
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
982
+ }
983
+ /**
984
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
985
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
986
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
987
+ */
988
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo,
989
+ const __nv_bfloat162 fhi) {
990
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
991
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2);
992
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
993
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
994
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
995
+ }
996
+ /**
997
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
998
+ * Constructor from \p float4 vector data type,
999
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1000
+ */
1001
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) {
1002
+ const float2 flo = {f.x, f.y};
1003
+ const float2 fhi = {f.z, f.w};
1004
+ const __nv_fp8x2_storage_t rlo =
1005
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1006
+ const __nv_fp8x2_storage_t rhi =
1007
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1008
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1009
+ }
1010
+ /**
1011
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1012
+ * Constructor from \p double4 vector data type,
1013
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1014
+ */
1015
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) {
1016
+ const double2 flo = {f.x, f.y};
1017
+ const double2 fhi = {f.z, f.w};
1018
+ const __nv_fp8x2_storage_t rlo =
1019
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
1020
+ const __nv_fp8x2_storage_t rhi =
1021
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
1022
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1023
+ }
1024
+
1025
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1026
+ /* Widening converts */
1027
+
1028
+ /**
1029
+ * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
1030
+ * Conversion operator to \p float4 vector data type.
1031
+ */
1032
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1033
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1034
+ const __nv_fp8x2_storage_t shi =
1035
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1036
+ float2 rlo = __internal_halfraw2_to_float2(
1037
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2));
1038
+ float2 rhi = __internal_halfraw2_to_float2(
1039
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2));
1040
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1041
+ return res;
1042
+ }
1043
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1044
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1045
+ };
1046
+
1047
+ /**
1048
+ * \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind.
1049
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1050
+ */
1051
+
1052
+ /**
1053
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1054
+ * \brief __nv_fp8_e4m3 datatype
1055
+ *
1056
+ * \details This structure implements the datatype for storing
1057
+ * \p fp8 floating-point numbers of \p e4m3 kind:
1058
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1059
+ * The encoding doesn't support Infinity.
1060
+ * NaNs are limited to 0x7F and 0xFF values.
1061
+ *
1062
+ * The structure implements converting constructors and operators.
1063
+ */
1064
+ struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 {
1065
+ public:
1066
+ /**
1067
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1068
+ * Storage variable contains the \p fp8 floating-point data.
1069
+ */
1070
+ __nv_fp8_storage_t __x;
1071
+
1072
+ /**
1073
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1074
+ * Constructor by default.
1075
+ */
1076
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1077
+ __nv_fp8_e4m3() = default;
1078
+ #else
1079
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {}
1080
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1081
+
1082
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1083
+
1084
+ /* Construct from wider FP types */
1085
+ /* Note we do avoid constructor init-list because of special host/device
1086
+ * compilation rules */
1087
+
1088
+ /**
1089
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1090
+ * Constructor from \p __half data type, relies on \p __NV_SATFINITE
1091
+ * behavior for out-of-range values.
1092
+ */
1093
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) {
1094
+ __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
1095
+ __NV_SATFINITE, __NV_E4M3);
1096
+ }
1097
+ /**
1098
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1099
+ * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
1100
+ * behavior for out-of-range values.
1101
+ */
1102
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) {
1103
+ __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
1104
+ __NV_SATFINITE, __NV_E4M3);
1105
+ }
1106
+ /**
1107
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1108
+ * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
1109
+ * for out-of-range values.
1110
+ */
1111
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) {
1112
+ __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1113
+ }
1114
+ /**
1115
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1116
+ * Constructor from \p double data type, relies on \p __NV_SATFINITE
1117
+ * behavior for out-of-range values.
1118
+ */
1119
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) {
1120
+ __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
1121
+ }
1122
+
1123
+ /* Converts from integral */
1124
+
1125
+ /**
1126
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1127
+ * Constructor from \p unsigned \p short \p int data type, relies on \p
1128
+ * __NV_SATFINITE behavior for out-of-range values.
1129
+ */
1130
+ explicit __CUDA_HOSTDEVICE_FP8__
1131
+ __nv_fp8_e4m3(const unsigned short int val) {
1132
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1133
+ }
1134
+ /**
1135
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1136
+ * Constructor from \p unsigned \p int data type, relies on \p
1137
+ * __NV_SATFINITE behavior for out-of-range values.
1138
+ */
1139
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) {
1140
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1141
+ }
1142
+ /**
1143
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1144
+ * Constructor from \p unsigned \p long \p long \p int data type, relies on
1145
+ * \p __NV_SATFINITE behavior for out-of-range values.
1146
+ */
1147
+ explicit __CUDA_HOSTDEVICE_FP8__
1148
+ __nv_fp8_e4m3(const unsigned long long int val) {
1149
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1150
+ }
1151
+
1152
+ /**
1153
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1154
+ * Constructor from \p short \p int data type, relies on \p
1155
+ * __NV_SATFINITE behavior for out-of-range values.
1156
+ */
1157
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) {
1158
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1159
+ }
1160
+ /**
1161
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1162
+ * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
1163
+ * for out-of-range values.
1164
+ */
1165
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) {
1166
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1167
+ }
1168
+ /**
1169
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1170
+ * Constructor from \p long \p long \p int data type, relies on \p
1171
+ * __NV_SATFINITE behavior for out-of-range values.
1172
+ */
1173
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) {
1174
+ __x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
1175
+ }
1176
+
1177
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1178
+ /* Widening FP converts */
1179
+ /**
1180
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1181
+ * Conversion operator to \p __half data type.
1182
+ */
1183
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
1184
+ return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1185
+ }
1186
+ /**
1187
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1188
+ * Conversion operator to \p float data type.
1189
+ */
1190
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
1191
+ return __internal_halfraw_to_float(
1192
+ __nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
1193
+ }
1194
+ /**
1195
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1196
+ * Conversion operator to \p __nv_bfloat16 data type.
1197
+ */
1198
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
1199
+ return static_cast<__nv_bfloat16>(
1200
+ __internal_float_to_bf16raw_rz(float(*this)));
1201
+ }
1202
+ /**
1203
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1204
+ * Conversion operator to \p double data type.
1205
+ */
1206
+ explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
1207
+ return static_cast<double>(float(*this));
1208
+ }
1209
+
1210
+ /* Convert to integral */
1211
+
1212
+ /**
1213
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1214
+ * Conversion operator to \p unsigned \p char data type.
1215
+ * Clamps negative and too large inputs to the output range.
1216
+ * \p NaN inputs convert to \p zero.
1217
+ */
1218
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
1219
+ unsigned char i;
1220
+ const float f = float(*this);
1221
+ const unsigned char max_val = 0xFFU;
1222
+ const unsigned char min_val = 0U;
1223
+ const unsigned char bits = (*this).__x;
1224
+ // saturation fixup
1225
+ if ((bits & 0x7FU) == 0x7FU) {
1226
+ // NaN
1227
+ i = 0;
1228
+ } else if (f > static_cast<float>(max_val)) {
1229
+ // saturate maximum
1230
+ i = max_val;
1231
+ } else if (f < static_cast<float>(min_val)) {
1232
+ // saturate minimum
1233
+ i = min_val;
1234
+ } else {
1235
+ // normal value
1236
+ i = static_cast<unsigned char>(f);
1237
+ }
1238
+ return i;
1239
+ }
1240
+
1241
+ /**
1242
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1243
+ * Conversion operator to \p unsigned \p short \p int data type.
1244
+ * Clamps negative inputs to zero.
1245
+ * \p NaN inputs convert to \p zero.
1246
+ */
1247
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
1248
+ return __half2ushort_rz(__half(*this));
1249
+ }
1250
+ /**
1251
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1252
+ * Conversion operator to \p unsigned \p int data type.
1253
+ * Clamps negative inputs to zero.
1254
+ * \p NaN inputs convert to \p zero.
1255
+ */
1256
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
1257
+ return __half2uint_rz(__half(*this));
1258
+ }
1259
+ /**
1260
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1261
+ * Conversion operator to \p unsigned \p long \p long \p int data type.
1262
+ * Clamps negative inputs to zero.
1263
+ * \p NaN inputs convert to \p 0x8000000000000000ULL.
1264
+ */
1265
+ explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
1266
+ return __half2ull_rz(__half(*this));
1267
+ }
1268
+
1269
+ /**
1270
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1271
+ * Conversion operator to \p signed \p char data type.
1272
+ * Clamps too large inputs to the output range.
1273
+ * \p NaN inputs convert to \p zero.
1274
+ */
1275
+ explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
1276
+ signed char i;
1277
+ const float f = float(*this);
1278
+ const signed char max_val = (signed char)0x7FU;
1279
+ const signed char min_val = (signed char)0x80U;
1280
+ const unsigned char bits = (*this).__x;
1281
+ // saturation fixup
1282
+ if ((bits & 0x7FU) == 0x7FU) {
1283
+ // NaN
1284
+ i = 0;
1285
+ } else if (f > static_cast<float>(max_val)) {
1286
+ // saturate maximum
1287
+ i = max_val;
1288
+ } else if (f < static_cast<float>(min_val)) {
1289
+ // saturate minimum
1290
+ i = min_val;
1291
+ } else {
1292
+ // normal value
1293
+ i = static_cast<signed char>(f);
1294
+ }
1295
+ return i;
1296
+ }
1297
+ /**
1298
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1299
+ * Conversion operator to \p short \p int data type.
1300
+ * \p NaN inputs convert to \p zero.
1301
+ */
1302
+ explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
1303
+ return __half2short_rz(__half(*this));
1304
+ }
1305
+ /**
1306
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1307
+ * Conversion operator to \p int data type.
1308
+ * \p NaN inputs convert to \p zero.
1309
+ */
1310
+ explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
1311
+ return __half2int_rz(__half(*this));
1312
+ }
1313
+ /**
1314
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1315
+ * Conversion operator to \p long \p long \p int data type.
1316
+ * \p NaN inputs convert to \p 0x8000000000000000LL.
1317
+ */
1318
+ explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
1319
+ return __half2ll_rz(__half(*this));
1320
+ }
1321
+
1322
+ /**
1323
+ * \ingroup CUDA_MATH_FP8_E4M3_STRUCT
1324
+ * Conversion operator to \p bool data type.
1325
+ * +0 and -0 inputs convert to \p false.
1326
+ * Non-zero inputs convert to \p true.
1327
+ */
1328
+ explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
1329
+ return (__x & 0x7FU) != 0U;
1330
+ }
1331
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1332
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1333
+ };
1334
+
1335
+ /**
1336
+ * \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind.
1337
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1338
+ */
1339
+
1340
+ /**
1341
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1342
+ * \brief __nv_fp8x2_e4m3 datatype
1343
+ *
1344
+ * \details This structure implements the datatype for storage
1345
+ * and operations on the vector of two \p fp8 values of \p e4m3 kind each:
1346
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1347
+ * The encoding doesn't support Infinity.
1348
+ * NaNs are limited to 0x7F and 0xFF values.
1349
+ */
1350
+ struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 {
1351
+ public:
1352
+ /**
1353
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1354
+ * Storage variable contains the vector of two \p fp8 floating-point data
1355
+ * values.
1356
+ */
1357
+ __nv_fp8x2_storage_t __x;
1358
+
1359
+ /**
1360
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1361
+ * Constructor by default.
1362
+ */
1363
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1364
+ __nv_fp8x2_e4m3() = default;
1365
+ #else
1366
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {}
1367
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1368
+
1369
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1370
+
1371
+ /* Construct from wider types */
1372
+
1373
+ /**
1374
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1375
+ * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
1376
+ * behavior for out-of-range values.
1377
+ */
1378
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) {
1379
+ __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
1380
+ __NV_SATFINITE, __NV_E4M3);
1381
+ }
1382
+ /**
1383
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1384
+ * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
1385
+ * behavior for out-of-range values.
1386
+ */
1387
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) {
1388
+ __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
1389
+ __NV_SATFINITE, __NV_E4M3);
1390
+ }
1391
+ /**
1392
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1393
+ * Constructor from \p float2 data type, relies on \p __NV_SATFINITE
1394
+ * behavior for out-of-range values.
1395
+ */
1396
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) {
1397
+ __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1398
+ }
1399
+ /**
1400
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1401
+ * Constructor from \p double2 data type, relies on \p __NV_SATFINITE
1402
+ * behavior for out-of-range values.
1403
+ */
1404
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) {
1405
+ __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
1406
+ }
1407
+
1408
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1409
+ /* Widening converts */
1410
+ /**
1411
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1412
+ * Conversion operator to \p __half2 data type.
1413
+ */
1414
+ explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
1415
+ return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1416
+ }
1417
+ /**
1418
+ * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
1419
+ * Conversion operator to \p float2 data type.
1420
+ */
1421
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
1422
+ return __internal_halfraw2_to_float2(
1423
+ __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
1424
+ }
1425
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1426
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1427
+ };
1428
+
1429
+ /**
1430
+ * \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind.
1431
+ * \ingroup CUDA_MATH_INTRINSIC_FP8
1432
+ */
1433
+
1434
+ /**
1435
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1436
+ * \brief __nv_fp8x4_e4m3 datatype
1437
+ *
1438
+ * \details This structure implements the datatype for storage
1439
+ * and operations on the vector of four \p fp8 values of \p e4m3 kind each:
1440
+ * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
1441
+ * The encoding doesn't support Infinity.
1442
+ * NaNs are limited to 0x7F and 0xFF values.
1443
+ */
1444
+ struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 {
1445
+ public:
1446
+ /**
1447
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1448
+ * Storage variable contains the vector of four \p fp8 floating-point data
1449
+ * values.
1450
+ */
1451
+ __nv_fp8x4_storage_t __x;
1452
+
1453
+ /**
1454
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1455
+ * Constructor by default.
1456
+ */
1457
+ #if defined(__CPP_VERSION_AT_LEAST_11_FP8)
1458
+ __nv_fp8x4_e4m3() = default;
1459
+ #else
1460
+ __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {}
1461
+ #endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
1462
+
1463
+ #if !defined(__CUDA_NO_FP8_CONVERSIONS__)
1464
+
1465
+ /* Construct from wider types */
1466
+
1467
+ /**
1468
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1469
+ * Constructor from a pair of \p __half2 data type values,
1470
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1471
+ */
1472
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo,
1473
+ const __half2 fhi) {
1474
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
1475
+ static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1476
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
1477
+ static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1478
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1479
+ }
1480
+ /**
1481
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1482
+ * Constructor from a pair of \p __nv_bfloat162 data type values,
1483
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1484
+ */
1485
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo,
1486
+ const __nv_bfloat162 fhi) {
1487
+ const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
1488
+ static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3);
1489
+ const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
1490
+ static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
1491
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1492
+ }
1493
+ /**
1494
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1495
+ * Constructor from \p float4 vector data type,
1496
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1497
+ */
1498
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) {
1499
+ const float2 flo = {f.x, f.y};
1500
+ const float2 fhi = {f.z, f.w};
1501
+ const __nv_fp8x2_storage_t rlo =
1502
+ __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1503
+ const __nv_fp8x2_storage_t rhi =
1504
+ __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1505
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1506
+ }
1507
+ /**
1508
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1509
+ * Constructor from \p double4 vector data type,
1510
+ * relies on \p __NV_SATFINITE behavior for out-of-range values.
1511
+ */
1512
+ explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) {
1513
+ const double2 flo = {f.x, f.y};
1514
+ const double2 fhi = {f.z, f.w};
1515
+ const __nv_fp8x2_storage_t rlo =
1516
+ __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
1517
+ const __nv_fp8x2_storage_t rhi =
1518
+ __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
1519
+ __x = __internal_pack_u16x2_to_u32(rlo, rhi);
1520
+ }
1521
+
1522
+ #if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
1523
+ /* Widening converts */
1524
+
1525
+ /**
1526
+ * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
1527
+ * Conversion operator to \p float4 vector data type.
1528
+ */
1529
+ explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
1530
+ const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
1531
+ const __nv_fp8x2_storage_t shi =
1532
+ static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
1533
+ float2 rlo = __internal_halfraw2_to_float2(
1534
+ __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3));
1535
+ float2 rhi = __internal_halfraw2_to_float2(
1536
+ __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3));
1537
+ float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
1538
+ return res;
1539
+ }
1540
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
1541
+ #endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
1542
+ };
1543
+
1544
+ #endif /* defined(__cplusplus) */
1545
+
1546
+ #endif /* end of include guard: __CUDA_FP8_HPP__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_H_
51
+ # define _CUDA_PIPELINE_H_
52
+
53
+ # include "cuda_pipeline_primitives.h"
54
+
55
+ # if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
56
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
57
+ -std=c++11 compiler option.
58
+ # endif
59
+
60
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
61
+ # include "cuda_awbarrier.h"
62
+ # endif
63
+
64
+ // Integration with libcu++'s cuda::barrier<cuda::thread_scope_block>.
65
+
66
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
67
+ # if defined(_LIBCUDACXX_CUDA_ABI_VERSION)
68
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION
69
+ # else
70
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4
71
+ # endif
72
+
73
+ # define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y
74
+ # define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y)
75
+ # define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION)
76
+
77
+ namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE {
78
+ struct __block_scope_barrier_base;
79
+ }}
80
+
81
+ # endif
82
+
83
+ _CUDA_PIPELINE_BEGIN_NAMESPACE
84
+
85
+ template<size_t N, typename T>
86
+ _CUDA_PIPELINE_QUALIFIER
87
+ auto segment(T* ptr) -> T(*)[N];
88
+
89
+ class pipeline {
90
+ public:
91
+ pipeline(const pipeline&) = delete;
92
+ pipeline(pipeline&&) = delete;
93
+ pipeline& operator=(const pipeline&) = delete;
94
+ pipeline& operator=(pipeline&&) = delete;
95
+
96
+ _CUDA_PIPELINE_QUALIFIER pipeline();
97
+ _CUDA_PIPELINE_QUALIFIER size_t commit();
98
+ _CUDA_PIPELINE_QUALIFIER void commit_and_wait();
99
+ _CUDA_PIPELINE_QUALIFIER void wait(size_t batch);
100
+ template<unsigned N>
101
+ _CUDA_PIPELINE_QUALIFIER void wait_prior();
102
+
103
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
104
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier);
105
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier);
106
+ # endif
107
+
108
+ private:
109
+ size_t current_batch;
110
+ };
111
+
112
+ template<class T>
113
+ _CUDA_PIPELINE_QUALIFIER
114
+ void memcpy_async(T& dst, const T& src, pipeline& pipe);
115
+
116
+ template<class T, size_t DstN, size_t SrcN>
117
+ _CUDA_PIPELINE_QUALIFIER
118
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe);
119
+
120
+ template<size_t N, typename T>
121
+ _CUDA_PIPELINE_QUALIFIER
122
+ auto segment(T* ptr) -> T(*)[N]
123
+ {
124
+ return (T(*)[N])ptr;
125
+ }
126
+
127
+ _CUDA_PIPELINE_QUALIFIER
128
+ pipeline::pipeline()
129
+ : current_batch(0)
130
+ {
131
+ }
132
+
133
+ _CUDA_PIPELINE_QUALIFIER
134
+ size_t pipeline::commit()
135
+ {
136
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
137
+ return this->current_batch++;
138
+ }
139
+
140
+ _CUDA_PIPELINE_QUALIFIER
141
+ void pipeline::commit_and_wait()
142
+ {
143
+ (void)pipeline::commit();
144
+ pipeline::wait_prior<0>();
145
+ }
146
+
147
+ _CUDA_PIPELINE_QUALIFIER
148
+ void pipeline::wait(size_t batch)
149
+ {
150
+ const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0;
151
+
152
+ switch (prior) {
153
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break;
154
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break;
155
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break;
156
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break;
157
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break;
158
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break;
159
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break;
160
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break;
161
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break;
162
+ }
163
+ }
164
+
165
+ template<unsigned N>
166
+ _CUDA_PIPELINE_QUALIFIER
167
+ void pipeline::wait_prior()
168
+ {
169
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<N>();
170
+ }
171
+
172
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
173
+ _CUDA_PIPELINE_QUALIFIER
174
+ void pipeline::arrive_on(awbarrier& barrier)
175
+ {
176
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier);
177
+ }
178
+
179
+ _CUDA_PIPELINE_QUALIFIER
180
+ void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier)
181
+ {
182
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast<uint64_t *>(&barrier));
183
+ }
184
+ # endif
185
+
186
+ template<class T>
187
+ _CUDA_PIPELINE_QUALIFIER
188
+ void memcpy_async(T& dst, const T& src, pipeline& pipe)
189
+ {
190
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&src) & (alignof(T) - 1)));
191
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&dst) & (alignof(T) - 1)));
192
+
193
+ if (__is_trivially_copyable(T)) {
194
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed<sizeof(T), alignof(T)>(
195
+ reinterpret_cast<void*>(&dst), reinterpret_cast<const void*>(&src));
196
+ } else {
197
+ dst = src;
198
+ }
199
+ }
200
+
201
+ template<class T, size_t DstN, size_t SrcN>
202
+ _CUDA_PIPELINE_QUALIFIER
203
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe)
204
+ {
205
+ constexpr size_t dst_size = sizeof(*dst);
206
+ constexpr size_t src_size = sizeof(*src);
207
+ static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size.");
208
+ static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size.");
209
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (dst_size - 1)));
210
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (dst_size - 1)));
211
+
212
+ if (__is_trivially_copyable(T)) {
213
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict<sizeof(*dst), sizeof(*src)>(
214
+ reinterpret_cast<void*>(*dst), reinterpret_cast<const void*>(*src));
215
+ } else {
216
+ for (size_t i = 0; i < DstN; ++i) {
217
+ (*dst)[i] = (i < SrcN) ? (*src)[i] : T();
218
+ }
219
+ }
220
+ }
221
+
222
+ _CUDA_PIPELINE_END_NAMESPACE
223
+
224
+ #endif /* !_CUDA_PIPELINE_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_HELPERS_H_
51
+ # define _CUDA_PIPELINE_HELPERS_H_
52
+
53
+ # define _CUDA_PIPELINE_NAMESPACE nvcuda::experimental
54
+ # define _CUDA_PIPELINE_BEGIN_NAMESPACE namespace nvcuda { namespace experimental {
55
+ # define _CUDA_PIPELINE_END_NAMESPACE } }
56
+
57
+ # define _CUDA_PIPELINE_INTERNAL_NAMESPACE _CUDA_PIPELINE_NAMESPACE::__pipeline_internal
58
+ # define _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE _CUDA_PIPELINE_BEGIN_NAMESPACE namespace __pipeline_internal {
59
+ # define _CUDA_PIPELINE_END_INTERNAL_NAMESPACE } _CUDA_PIPELINE_END_NAMESPACE
60
+
61
+ # if !defined(_CUDA_PIPELINE_QUALIFIER)
62
+ # define _CUDA_PIPELINE_QUALIFIER inline __device__
63
+ # endif
64
+ # if !defined(_CUDA_PIPELINE_STATIC_QUALIFIER)
65
+ # define _CUDA_PIPELINE_STATIC_QUALIFIER static inline __device__
66
+ # endif
67
+
68
+ # if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
69
+ # define _CUDA_PIPELINE_ARCH_700_OR_LATER
70
+ # endif
71
+
72
+ # if (__CUDA_ARCH__ >= 800)
73
+ # define _CUDA_PIPELINE_HAS_ASYNC_COPY 1
74
+ # else
75
+ # define _CUDA_PIPELINE_HAS_ASYNC_COPY 0
76
+ # endif
77
+
78
+ # if !defined(_CUDA_PIPELINE_MAX_STAGES)
79
+ # define _CUDA_PIPELINE_MAX_STAGES 8
80
+ # endif
81
+
82
+ # if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900)))
83
+ # define _CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER
84
+ # endif
85
+
86
+ # if !defined(_CUDA_PIPELINE_DEBUG)
87
+ # if defined(__CUDACC_DEBUG__)
88
+ # define _CUDA_PIPELINE_DEBUG 1
89
+ # else
90
+ # define _CUDA_PIPELINE_DEBUG 0
91
+ # endif
92
+ # endif
93
+
94
+ # if defined(_CUDA_PIPELINE_DEBUG) && (_CUDA_PIPELINE_DEBUG == 1) && !defined(NDEBUG)
95
+ # if !defined(__CUDACC_RTC__)
96
+ # include <cassert>
97
+ # endif
98
+ # define _CUDA_PIPELINE_ASSERT(x) assert((x));
99
+ # define _CUDA_PIPELINE_ABORT() assert(0);
100
+ # else
101
+ # define _CUDA_PIPELINE_ASSERT(x)
102
+ # define _CUDA_PIPELINE_ABORT() __trap();
103
+ # endif
104
+
105
+ # if defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
106
+ # define _CUDA_PIPELINE_STATIC_ASSERT(c, m) static_assert(c, m)
107
+ # else
108
+ # define _CUDA_PIPELINE_STATIC_ASSERT(c, m)
109
+ # endif
110
+
111
+ # if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__)
112
+ # define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "r"
113
+ # else
114
+ # define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "l"
115
+ # endif
116
+
117
+ # if defined(__CUDACC_RTC__)
118
+ typedef unsigned int uint32_t;
119
+ typedef unsigned long long uint64_t;
120
+ typedef uint64_t uintptr_t;
121
+ # else
122
+ # include <stdint.h>
123
+ # endif
124
+
125
+ _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE
126
+
127
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(short) == 2, "Size mismatch for type 'short'");
128
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int) == 4, "Size mismatch for type 'int'");
129
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int2) == 8, "Size mismatch for type 'int2'");
130
+ _CUDA_PIPELINE_STATIC_ASSERT(sizeof(int4) == 16, "Size mismatch for type 'int4'");
131
+
132
+ extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *);
133
+
134
+ template<size_t CopySize, size_t SourceSize>
135
+ _CUDA_PIPELINE_QUALIFIER
136
+ void pipeline_memcpy_sync(void* __restrict__ dst, const void* __restrict__ src)
137
+ {
138
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
139
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
140
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
141
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
142
+
143
+ char* const d = reinterpret_cast<char*>(dst);
144
+ const char* const s = reinterpret_cast<const char*>(src);
145
+
146
+ size_t copy_step_size;
147
+ if (SourceSize == 0) {
148
+ copy_step_size = CopySize;
149
+ } else if (SourceSize == 2 || SourceSize == 4 || SourceSize == 8 || SourceSize == 16) {
150
+ copy_step_size = SourceSize;
151
+ } else {
152
+ copy_step_size = 1;
153
+ }
154
+
155
+ for (size_t i = 0; i < CopySize; i += copy_step_size) {
156
+ const bool copy_source = SourceSize && (i < SourceSize);
157
+
158
+ switch (copy_step_size) {
159
+ case 1:
160
+ d[i] = copy_source ? s[i] : char();
161
+ break;
162
+ case 2:
163
+ *reinterpret_cast<short*>(d + i) = copy_source ? *reinterpret_cast<const short*>(s + i) : short();
164
+ break;
165
+ case 4:
166
+ *reinterpret_cast<int*>(d + i) = copy_source ? *reinterpret_cast<const int*>(s + i) : int();
167
+ break;
168
+ case 8:
169
+ *reinterpret_cast<int2*>(d + i) = copy_source ? *reinterpret_cast<const int2*>(s + i) : int2();
170
+ break;
171
+ case 16:
172
+ *reinterpret_cast<int4*>(d + i) = copy_source ? *reinterpret_cast<const int4*>(s + i) : int4();
173
+ break;
174
+ }
175
+ }
176
+ }
177
+
178
+ template<bool UseHwAsyncCopy>
179
+ struct ImplementationChooser;
180
+
181
+ template<>
182
+ struct ImplementationChooser<true> {
183
+ template<size_t CopySize, size_t SourceSize>
184
+ struct CpAsyncChooser {
185
+ _CUDA_PIPELINE_STATIC_QUALIFIER
186
+ void cp_async(void* __restrict__ dst, const void* __restrict__ src)
187
+ {
188
+ asm volatile ("cp.async.ca.shared.global [%0], [%1], %2, %3;"
189
+ :
190
+ : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(CopySize),
191
+ "n"(SourceSize)
192
+ : "memory");
193
+ }
194
+ };
195
+
196
+ template<size_t SourceSize>
197
+ struct CpAsyncChooser<16, SourceSize> {
198
+ _CUDA_PIPELINE_STATIC_QUALIFIER
199
+ void cp_async(void* __restrict__ dst, const void* __restrict__ src)
200
+ {
201
+ asm volatile ("cp.async.cg.shared.global [%0], [%1], %2, %3;"
202
+ :
203
+ : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(16), "n"(SourceSize)
204
+ : "memory");
205
+ }
206
+ };
207
+
208
+ template<size_t CopySize, size_t SourceSize>
209
+ _CUDA_PIPELINE_STATIC_QUALIFIER
210
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
211
+ {
212
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
213
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
214
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
215
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
216
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
217
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
218
+
219
+ CpAsyncChooser<CopySize, SourceSize>::cp_async(dst, src);
220
+ }
221
+
222
+ _CUDA_PIPELINE_STATIC_QUALIFIER
223
+ void pipeline_commit()
224
+ {
225
+ asm volatile ("cp.async.commit_group;");
226
+ }
227
+
228
+ template<unsigned N>
229
+ _CUDA_PIPELINE_STATIC_QUALIFIER
230
+ void pipeline_wait_prior()
231
+ {
232
+ asm volatile ("cp.async.wait_group %0;"
233
+ :
234
+ : "n"(N < _CUDA_PIPELINE_MAX_STAGES ? N : _CUDA_PIPELINE_MAX_STAGES));
235
+ }
236
+
237
+ _CUDA_PIPELINE_STATIC_QUALIFIER
238
+ void pipeline_arrive_on(uint64_t* barrier)
239
+ {
240
+ _CUDA_PIPELINE_ASSERT(__isShared(barrier));
241
+
242
+ asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];"
243
+ :
244
+ : "r"(__nvvm_get_smem_pointer(barrier)));
245
+ }
246
+ };
247
+
248
+ template<>
249
+ struct ImplementationChooser<false> {
250
+ template<size_t CopySize, size_t SourceSize>
251
+ _CUDA_PIPELINE_STATIC_QUALIFIER
252
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
253
+ {
254
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
255
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
256
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
257
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
258
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
259
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
260
+
261
+ pipeline_memcpy_sync<CopySize, SourceSize>(dst, src);
262
+ }
263
+
264
+ _CUDA_PIPELINE_STATIC_QUALIFIER
265
+ void pipeline_commit()
266
+ {
267
+ }
268
+
269
+ template<unsigned N>
270
+ _CUDA_PIPELINE_STATIC_QUALIFIER
271
+ void pipeline_wait_prior()
272
+ {
273
+ }
274
+
275
+ _CUDA_PIPELINE_STATIC_QUALIFIER
276
+ void pipeline_arrive_on(uint64_t* barrier)
277
+ {
278
+ }
279
+ };
280
+
281
+ template<size_t CopySize, size_t SourceSize>
282
+ _CUDA_PIPELINE_QUALIFIER
283
+ void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src)
284
+ {
285
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
286
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size");
287
+ _CUDA_PIPELINE_ASSERT(__isShared(dst));
288
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src));
289
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
290
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
291
+
292
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_memcpy_async<CopySize, SourceSize>(dst, src);
293
+ }
294
+
295
+ _CUDA_PIPELINE_QUALIFIER
296
+ void pipeline_commit()
297
+ {
298
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_commit();
299
+ }
300
+
301
+ template<unsigned N>
302
+ _CUDA_PIPELINE_QUALIFIER
303
+ void pipeline_wait_prior()
304
+ {
305
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_wait_prior<N>();
306
+ }
307
+
308
+ _CUDA_PIPELINE_QUALIFIER
309
+ void pipeline_arrive_on(uint64_t* barrier)
310
+ {
311
+ ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_arrive_on(barrier);
312
+ }
313
+
314
+ template<size_t CopySize, size_t SourceSize>
315
+ _CUDA_PIPELINE_QUALIFIER
316
+ void pipeline_copy_strict(void* __restrict__ dst, const void* __restrict__ src)
317
+ {
318
+ _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size.");
319
+ _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size.");
320
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (CopySize - 1)));
321
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (CopySize - 1)));
322
+
323
+ if (__isGlobal(src) && __isShared(dst)) {
324
+ pipeline_memcpy_async<CopySize, SourceSize>(dst, src);
325
+ } else {
326
+ pipeline_memcpy_sync<CopySize, SourceSize>(dst, src);
327
+ }
328
+ }
329
+
330
+ template<size_t CopySize, size_t Align>
331
+ _CUDA_PIPELINE_QUALIFIER
332
+ void pipeline_copy_relaxed(void* __restrict__ dst, const void* __restrict__ src)
333
+ {
334
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (Align - 1)));
335
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (Align - 1)));
336
+
337
+ const char* s = reinterpret_cast<const char*>(src);
338
+ char* d = reinterpret_cast<char*>(dst);
339
+ size_t remaining = CopySize;
340
+
341
+ while (remaining) {
342
+ if ((Align >= 16) && (remaining >= 16)) {
343
+ pipeline_copy_strict<16, 16>(dst, src);
344
+ d += 16;
345
+ s += 16;
346
+ remaining -= 16;
347
+ } else if ((Align >= 8) && (remaining >= 8)) {
348
+ pipeline_copy_strict<8, 8>(dst, src);
349
+ d += 8;
350
+ s += 8;
351
+ remaining -= 8;
352
+ } else if ((Align >= 4) && (remaining >= 4)) {
353
+ pipeline_copy_strict<4, 4>(dst, src);
354
+ d += 4;
355
+ s += 4;
356
+ remaining -= 4;
357
+ } else if ((Align >= 2) && (remaining >= 2)) {
358
+ *reinterpret_cast<short*>(d) = *reinterpret_cast<const short*>(s);
359
+ d += 2;
360
+ s += 2;
361
+ remaining -= 2;
362
+ } else {
363
+ *d = *s;
364
+ d += 1;
365
+ s += 1;
366
+ remaining -= 1;
367
+ }
368
+ }
369
+ }
370
+
371
+ _CUDA_PIPELINE_END_INTERNAL_NAMESPACE
372
+
373
+ #endif /* !_CUDA_PIPELINE_HELPERS_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_PRIMITIVES_H_
51
+ # define _CUDA_PIPELINE_PRIMITIVES_H_
52
+
53
+ # include "cuda_pipeline_helpers.h"
54
+
55
+ _CUDA_PIPELINE_STATIC_QUALIFIER
56
+ void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align,
57
+ size_t zfill = 0)
58
+ {
59
+ _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16);
60
+ _CUDA_PIPELINE_ASSERT(zfill <= size_and_align);
61
+ _CUDA_PIPELINE_ASSERT(__isShared(dst_shared));
62
+ _CUDA_PIPELINE_ASSERT(__isGlobal(src_global));
63
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst_shared) & (size_and_align - 1)));
64
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src_global) & (size_and_align - 1)));
65
+
66
+ switch (size_and_align) {
67
+ case 16:
68
+ switch (zfill) {
69
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return;
70
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return;
71
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return;
72
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return;
73
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return;
74
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return;
75
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return;
76
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return;
77
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return;
78
+ case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return;
79
+ case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return;
80
+ case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return;
81
+ case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return;
82
+ case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return;
83
+ case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return;
84
+ case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return;
85
+ case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return;
86
+ default: _CUDA_PIPELINE_ABORT(); return;
87
+ }
88
+ case 8:
89
+ switch (zfill) {
90
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return;
91
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return;
92
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return;
93
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return;
94
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return;
95
+ case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return;
96
+ case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return;
97
+ case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return;
98
+ case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return;
99
+ default: _CUDA_PIPELINE_ABORT(); return;
100
+ }
101
+ case 4:
102
+ switch (zfill) {
103
+ case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return;
104
+ case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return;
105
+ case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return;
106
+ case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return;
107
+ case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return;
108
+ default: _CUDA_PIPELINE_ABORT(); return;
109
+ }
110
+ default:
111
+ _CUDA_PIPELINE_ABORT();
112
+ return;
113
+ }
114
+ }
115
+
116
+ _CUDA_PIPELINE_STATIC_QUALIFIER
117
+ void __pipeline_commit()
118
+ {
119
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
120
+ }
121
+
122
+ _CUDA_PIPELINE_STATIC_QUALIFIER
123
+ void __pipeline_wait_prior(size_t prior)
124
+ {
125
+ switch (prior) {
126
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return;
127
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return;
128
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return;
129
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return;
130
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return;
131
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return;
132
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return;
133
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return;
134
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return;
135
+ }
136
+ }
137
+
138
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
139
+ # include "cuda_awbarrier_primitives.h"
140
+
141
+ _CUDA_PIPELINE_STATIC_QUALIFIER
142
+ void __pipeline_arrive_on(__mbarrier_t* barrier)
143
+ {
144
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier);
145
+ }
146
+ # endif
147
+
148
+ #endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h ADDED
@@ -0,0 +1,2300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_RUNTIME_H__)
51
+ #define __CUDA_RUNTIME_H__
52
+
53
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
54
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
55
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__
56
+ #endif
57
+
58
+ #if !defined(__CUDACC_RTC__)
59
+ #if defined(__GNUC__)
60
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
61
+ #pragma GCC diagnostic push
62
+ #endif
63
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
64
+ #pragma GCC diagnostic ignored "-Wunused-function"
65
+ #endif
66
+ #elif defined(_MSC_VER)
67
+ #pragma warning(push)
68
+ #pragma warning(disable: 4820)
69
+ #endif
70
+ #endif
71
+
72
+ #ifdef __QNX__
73
+ #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
74
+ typedef unsigned size_t;
75
+ #endif
76
+ #endif
77
+ /*******************************************************************************
78
+ * *
79
+ * *
80
+ * *
81
+ *******************************************************************************/
82
+
83
+ #include "crt/host_config.h"
84
+
85
+ /*******************************************************************************
86
+ * *
87
+ * *
88
+ * *
89
+ *******************************************************************************/
90
+
91
+ #include "builtin_types.h"
92
+ #include "library_types.h"
93
+ #if !defined(__CUDACC_RTC__)
94
+ #define EXCLUDE_FROM_RTC
95
+ #include "channel_descriptor.h"
96
+ #include "cuda_runtime_api.h"
97
+ #include "driver_functions.h"
98
+ #undef EXCLUDE_FROM_RTC
99
+ #endif /* !__CUDACC_RTC__ */
100
+ #include "crt/host_defines.h"
101
+ #ifdef __CUDACC_RTC__
102
+ #include "target"
103
+ #endif /* defined(__CUDACC_RTC__) */
104
+
105
+
106
+ #include "vector_functions.h"
107
+
108
+ #if defined(__CUDACC__)
109
+
110
+ #if defined(__CUDACC_RTC__)
111
+ #include "nvrtc_device_runtime.h"
112
+ #include "crt/device_functions.h"
113
+ #include "crt/common_functions.h"
114
+ #include "device_launch_parameters.h"
115
+
116
+ #else /* !__CUDACC_RTC__ */
117
+ #define EXCLUDE_FROM_RTC
118
+ #include "crt/common_functions.h"
119
+ #include "crt/device_functions.h"
120
+ #include "device_launch_parameters.h"
121
+
122
+ #if defined(__CUDACC_EXTENDED_LAMBDA__)
123
+ #include <functional>
124
+ #include <utility>
125
+ struct __device_builtin__ __nv_lambda_preheader_injection { };
126
+ #endif /* defined(__CUDACC_EXTENDED_LAMBDA__) */
127
+
128
+ #undef EXCLUDE_FROM_RTC
129
+ #endif /* __CUDACC_RTC__ */
130
+
131
+ #endif /* __CUDACC__ */
132
+
133
+ /** \cond impl_private */
134
+ #if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
135
+ #define __CUDA_DEPRECATED
136
+ #elif defined(_MSC_VER)
137
+ #define __CUDA_DEPRECATED __declspec(deprecated)
138
+ #elif defined(__GNUC__)
139
+ #define __CUDA_DEPRECATED __attribute__((deprecated))
140
+ #else
141
+ #define __CUDA_DEPRECATED
142
+ #endif
143
+ /** \endcond impl_private */
144
+
145
+ #if defined(__cplusplus) && !defined(__CUDACC_RTC__)
146
+
147
+ #if __cplusplus >= 201103
148
+ #include <utility>
149
+ #endif
150
+
151
+ /*******************************************************************************
152
+ * *
153
+ * *
154
+ * *
155
+ *******************************************************************************/
156
+
157
+ /**
158
+ * \addtogroup CUDART_HIGHLEVEL
159
+ * @{
160
+ */
161
+
162
+ /**
163
+ *\brief Launches a device function
164
+ *
165
+ * The function invokes kernel \p func on \p gridDim (\p gridDim.x &times; \p gridDim.y
166
+ * &times; \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x &times;
167
+ * \p blockDim.y &times; \p blockDim.z) threads.
168
+ *
169
+ * If the kernel has N parameters the \p args should point to array of N pointers.
170
+ * Each pointer, from <tt>args[0]</tt> to <tt>args[N - 1]</tt>, point to the region
171
+ * of memory from which the actual parameter will be copied.
172
+ *
173
+ * \p sharedMem sets the amount of dynamic shared memory that will be available to
174
+ * each thread block.
175
+ *
176
+ * \p stream specifies a stream the invocation is associated to.
177
+ *
178
+ * \param func - Device function symbol
179
+ * \param gridDim - Grid dimentions
180
+ * \param blockDim - Block dimentions
181
+ * \param args - Arguments
182
+ * \param sharedMem - Shared memory (defaults to 0)
183
+ * \param stream - Stream identifier (defaults to NULL)
184
+ *
185
+ * \return
186
+ * ::cudaSuccess,
187
+ * ::cudaErrorInvalidDeviceFunction,
188
+ * ::cudaErrorInvalidConfiguration,
189
+ * ::cudaErrorLaunchFailure,
190
+ * ::cudaErrorLaunchTimeout,
191
+ * ::cudaErrorLaunchOutOfResources,
192
+ * ::cudaErrorSharedObjectInitFailed,
193
+ * ::cudaErrorInvalidPtx,
194
+ * ::cudaErrorUnsupportedPtxVersion,
195
+ * ::cudaErrorNoKernelImageForDevice,
196
+ * ::cudaErrorJitCompilerNotFound,
197
+ * ::cudaErrorJitCompilationDisabled
198
+ * \notefnerr
199
+ * \note_async
200
+ * \note_null_stream
201
+ * \note_init_rt
202
+ * \note_callback
203
+ *
204
+ * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)"
205
+ */
206
+ template<class T>
207
+ static __inline__ __host__ cudaError_t cudaLaunchKernel(
208
+ const T *func,
209
+ dim3 gridDim,
210
+ dim3 blockDim,
211
+ void **args,
212
+ size_t sharedMem = 0,
213
+ cudaStream_t stream = 0
214
+ )
215
+ {
216
+ return ::cudaLaunchKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream);
217
+ }
218
+
219
+
220
+ #if __cplusplus >= 201103 || defined(__DOXYGEN_ONLY__)
221
+ /**
222
+ * \brief Launches a CUDA function with launch-time configuration
223
+ *
224
+ * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x
225
+ * &times; \p config->gridDim.y &times; \p config->gridDim.z) grid of blocks.
226
+ * Each block contains \p config->blockDim (\p config->blockDim.x &times;
227
+ * \p config->blockDim.y &times; \p config->blockDim.z) threads.
228
+ *
229
+ * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that
230
+ * will be available to each thread block.
231
+ *
232
+ * \p config->stream specifies a stream the invocation is associated to.
233
+ *
234
+ * Configuration beyond grid and block dimensions, dynamic shared memory size,
235
+ * and stream can be provided with the following two fields of \p config:
236
+ *
237
+ * \p config->attrs is an array of \p config->numAttrs contiguous
238
+ * ::cudaLaunchAttribute elements. The value of this pointer is not considered
239
+ * if \p config->numAttrs is zero. However, in that case, it is recommended to
240
+ * set the pointer to NULL.
241
+ * \p config->numAttrs is the number of attributes populating the first
242
+ * \p config->numAttrs positions of the \p config->attrs array.
243
+ *
244
+ * The kernel arguments should be passed as arguments to this function via the
245
+ * \p args parameter pack.
246
+ *
247
+ * The C API version of this function, \p cudaLaunchKernelExC, is also available
248
+ * for pre-C++11 compilers and for use cases where the ability to pass kernel
249
+ * parameters via void* array is preferable.
250
+ *
251
+ * \param config - Launch configuration
252
+ * \param func - Kernel to launch
253
+ * \param args - Parameter pack of kernel parameters
254
+ *
255
+ * \return
256
+ * ::cudaSuccess,
257
+ * ::cudaErrorInvalidDeviceFunction,
258
+ * ::cudaErrorInvalidConfiguration,
259
+ * ::cudaErrorLaunchFailure,
260
+ * ::cudaErrorLaunchTimeout,
261
+ * ::cudaErrorLaunchOutOfResources,
262
+ * ::cudaErrorSharedObjectInitFailed,
263
+ * ::cudaErrorInvalidPtx,
264
+ * ::cudaErrorUnsupportedPtxVersion,
265
+ * ::cudaErrorNoKernelImageForDevice,
266
+ * ::cudaErrorJitCompilerNotFound,
267
+ * ::cudaErrorJitCompilationDisabled
268
+ * \note_null_stream
269
+ * \notefnerr
270
+ * \note_init_rt
271
+ * \note_callback
272
+ *
273
+ * \sa
274
+ * \ref ::cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args) "cudaLaunchKernelEx (C API)",
275
+ * ::cuLaunchKernelEx
276
+ */
277
+ template<typename... ExpTypes, typename... ActTypes>
278
+ static __inline__ __host__ cudaError_t cudaLaunchKernelEx(
279
+ const cudaLaunchConfig_t *config,
280
+ void (*kernel)(ExpTypes...),
281
+ ActTypes &&... args
282
+ )
283
+ {
284
+ return [&](ExpTypes... coercedArgs){
285
+ void *pArgs[] = { &coercedArgs... };
286
+ return ::cudaLaunchKernelExC(config, (const void *)kernel, pArgs);
287
+ }(std::forward<ActTypes>(args)...);
288
+ }
289
+ #endif
290
+
291
+ /**
292
+ *\brief Launches a device function
293
+ *
294
+ * The function invokes kernel \p func on \p gridDim (\p gridDim.x &times; \p gridDim.y
295
+ * &times; \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x &times;
296
+ * \p blockDim.y &times; \p blockDim.z) threads.
297
+ *
298
+ * The device on which this kernel is invoked must have a non-zero value for
299
+ * the device attribute ::cudaDevAttrCooperativeLaunch.
300
+ *
301
+ * The total number of blocks launched cannot exceed the maximum number of blocks per
302
+ * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or
303
+ * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors
304
+ * as specified by the device attribute ::cudaDevAttrMultiProcessorCount.
305
+ *
306
+ * The kernel cannot make use of CUDA dynamic parallelism.
307
+ *
308
+ * If the kernel has N parameters the \p args should point to array of N pointers.
309
+ * Each pointer, from <tt>args[0]</tt> to <tt>args[N - 1]</tt>, point to the region
310
+ * of memory from which the actual parameter will be copied.
311
+ *
312
+ * \p sharedMem sets the amount of dynamic shared memory that will be available to
313
+ * each thread block.
314
+ *
315
+ * \p stream specifies a stream the invocation is associated to.
316
+ *
317
+ * \param func - Device function symbol
318
+ * \param gridDim - Grid dimentions
319
+ * \param blockDim - Block dimentions
320
+ * \param args - Arguments
321
+ * \param sharedMem - Shared memory (defaults to 0)
322
+ * \param stream - Stream identifier (defaults to NULL)
323
+ *
324
+ * \return
325
+ * ::cudaSuccess,
326
+ * ::cudaErrorInvalidDeviceFunction,
327
+ * ::cudaErrorInvalidConfiguration,
328
+ * ::cudaErrorLaunchFailure,
329
+ * ::cudaErrorLaunchTimeout,
330
+ * ::cudaErrorLaunchOutOfResources,
331
+ * ::cudaErrorSharedObjectInitFailed
332
+ * \notefnerr
333
+ * \note_async
334
+ * \note_null_stream
335
+ * \note_init_rt
336
+ * \note_callback
337
+ *
338
+ * \ref ::cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C API)"
339
+ */
340
+ template<class T>
341
+ static __inline__ __host__ cudaError_t cudaLaunchCooperativeKernel(
342
+ const T *func,
343
+ dim3 gridDim,
344
+ dim3 blockDim,
345
+ void **args,
346
+ size_t sharedMem = 0,
347
+ cudaStream_t stream = 0
348
+ )
349
+ {
350
+ return ::cudaLaunchCooperativeKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream);
351
+ }
352
+
353
+ /**
354
+ * \brief \hl Creates an event object with the specified flags
355
+ *
356
+ * Creates an event object with the specified flags. Valid flags include:
357
+ * - ::cudaEventDefault: Default event creation flag.
358
+ * - ::cudaEventBlockingSync: Specifies that event should use blocking
359
+ * synchronization. A host thread that uses ::cudaEventSynchronize() to wait
360
+ * on an event created with this flag will block until the event actually
361
+ * completes.
362
+ * - ::cudaEventDisableTiming: Specifies that the created event does not need
363
+ * to record timing data. Events created with this flag specified and
364
+ * the ::cudaEventBlockingSync flag not specified will provide the best
365
+ * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery().
366
+ *
367
+ * \param event - Newly created event
368
+ * \param flags - Flags for new event
369
+ *
370
+ * \return
371
+ * ::cudaSuccess,
372
+ * ::cudaErrorInvalidValue,
373
+ * ::cudaErrorLaunchFailure,
374
+ * ::cudaErrorMemoryAllocation
375
+ * \notefnerr
376
+ * \note_init_rt
377
+ * \note_callback
378
+ *
379
+ * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)",
380
+ * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery,
381
+ * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,
382
+ * ::cudaStreamWaitEvent
383
+ */
384
+ static __inline__ __host__ cudaError_t cudaEventCreate(
385
+ cudaEvent_t *event,
386
+ unsigned int flags
387
+ )
388
+ {
389
+ return ::cudaEventCreateWithFlags(event, flags);
390
+ }
391
+
392
+ /**
393
+ * \brief Creates an executable graph from a graph
394
+ *
395
+ * Instantiates \p graph as an executable graph. The graph is validated for any
396
+ * structural constraints or intra-node constraints which were not previously
397
+ * validated. If instantiation is successful, a handle to the instantiated graph
398
+ * is returned in \p pGraphExec.
399
+ *
400
+ * If there are any errors, diagnostic information may be returned in \p pErrorNode and
401
+ * \p pLogBuffer. This is the primary way to inspect instantiation errors. The output
402
+ * will be null terminated unless the diagnostics overflow
403
+ * the buffer. In this case, they will be truncated, and the last byte can be
404
+ * inspected to determine if truncation occurred.
405
+ *
406
+ * \param pGraphExec - Returns instantiated graph
407
+ * \param graph - Graph to instantiate
408
+ * \param pErrorNode - In case of an instantiation error, this may be modified to
409
+ * indicate a node contributing to the error
410
+ * \param pLogBuffer - A character buffer to store diagnostic messages
411
+ * \param bufferSize - Size of the log buffer in bytes
412
+ *
413
+ * \return
414
+ * ::cudaSuccess,
415
+ * ::cudaErrorInvalidValue
416
+ * \note_graph_thread_safety
417
+ * \notefnerr
418
+ * \note_init_rt
419
+ * \note_callback
420
+ *
421
+ * \sa
422
+ * ::cudaGraphInstantiateWithFlags,
423
+ * ::cudaGraphCreate,
424
+ * ::cudaGraphUpload,
425
+ * ::cudaGraphLaunch,
426
+ * ::cudaGraphExecDestroy
427
+ */
428
+ static __inline__ __host__ cudaError_t cudaGraphInstantiate(
429
+ cudaGraphExec_t *pGraphExec,
430
+ cudaGraph_t graph,
431
+ cudaGraphNode_t *pErrorNode,
432
+ char *pLogBuffer,
433
+ size_t bufferSize
434
+ )
435
+ {
436
+ (void)pErrorNode;
437
+ (void)pLogBuffer;
438
+ (void)bufferSize;
439
+ return ::cudaGraphInstantiate(pGraphExec, graph, 0);
440
+ }
441
+
442
+ /**
443
+ * \brief \hl Allocates page-locked memory on the host
444
+ *
445
+ * Allocates \p size bytes of host memory that is page-locked and accessible
446
+ * to the device. The driver tracks the virtual memory ranges allocated with
447
+ * this function and automatically accelerates calls to functions such as
448
+ * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it
449
+ * can be read or written with much higher bandwidth than pageable memory
450
+ * obtained with functions such as ::malloc(). Allocating excessive amounts of
451
+ * pinned memory may degrade system performance, since it reduces the amount
452
+ * of memory available to the system for paging. As a result, this function is
453
+ * best used sparingly to allocate staging areas for data exchange between host
454
+ * and device.
455
+ *
456
+ * The \p flags parameter enables different options to be specified that affect
457
+ * the allocation, as follows.
458
+ * - ::cudaHostAllocDefault: This flag's value is defined to be 0.
459
+ * - ::cudaHostAllocPortable: The memory returned by this call will be
460
+ * considered as pinned memory by all CUDA contexts, not just the one that
461
+ * performed the allocation.
462
+ * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space.
463
+ * The device pointer to the memory may be obtained by calling
464
+ * ::cudaHostGetDevicePointer().
465
+ * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC).
466
+ * WC memory can be transferred across the PCI Express bus more quickly on some
467
+ * system configurations, but cannot be read efficiently by most CPUs. WC
468
+ * memory is a good option for buffers that will be written by the CPU and read
469
+ * by the device via mapped pinned memory or host->device transfers.
470
+ *
471
+ * All of these flags are orthogonal to one another: a developer may allocate
472
+ * memory that is portable, mapped and/or write-combined with no restrictions.
473
+ *
474
+ * ::cudaSetDeviceFlags() must have been called with the ::cudaDeviceMapHost
475
+ * flag in order for the ::cudaHostAllocMapped flag to have any effect.
476
+ *
477
+ * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices
478
+ * that do not support mapped pinned memory. The failure is deferred to
479
+ * ::cudaHostGetDevicePointer() because the memory may be mapped into other
480
+ * CUDA contexts via the ::cudaHostAllocPortable flag.
481
+ *
482
+ * Memory allocated by this function must be freed with ::cudaFreeHost().
483
+ *
484
+ * \param ptr - Device pointer to allocated memory
485
+ * \param size - Requested allocation size in bytes
486
+ * \param flags - Requested properties of allocated memory
487
+ *
488
+ * \return
489
+ * ::cudaSuccess,
490
+ * ::cudaErrorMemoryAllocation
491
+ * \notefnerr
492
+ * \note_init_rt
493
+ * \note_callback
494
+ *
495
+ * \sa ::cudaSetDeviceFlags,
496
+ * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)",
497
+ * ::cudaFreeHost, ::cudaHostAlloc
498
+ */
499
+ static __inline__ __host__ cudaError_t cudaMallocHost(
500
+ void **ptr,
501
+ size_t size,
502
+ unsigned int flags
503
+ )
504
+ {
505
+ return ::cudaHostAlloc(ptr, size, flags);
506
+ }
507
+
508
+ template<class T>
509
+ static __inline__ __host__ cudaError_t cudaHostAlloc(
510
+ T **ptr,
511
+ size_t size,
512
+ unsigned int flags
513
+ )
514
+ {
515
+ return ::cudaHostAlloc((void**)(void*)ptr, size, flags);
516
+ }
517
+
518
+ template<class T>
519
+ static __inline__ __host__ cudaError_t cudaHostGetDevicePointer(
520
+ T **pDevice,
521
+ void *pHost,
522
+ unsigned int flags
523
+ )
524
+ {
525
+ return ::cudaHostGetDevicePointer((void**)(void*)pDevice, pHost, flags);
526
+ }
527
+
528
+ /**
529
+ * \brief Allocates memory that will be automatically managed by the Unified Memory system
530
+ *
531
+ * Allocates \p size bytes of managed memory on the device and returns in
532
+ * \p *devPtr a pointer to the allocated memory. If the device doesn't support
533
+ * allocating managed memory, ::cudaErrorNotSupported is returned. Support
534
+ * for managed memory can be queried using the device attribute
535
+ * ::cudaDevAttrManagedMemory. The allocated memory is suitably
536
+ * aligned for any kind of variable. The memory is not cleared. If \p size
537
+ * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer
538
+ * is valid on the CPU and on all GPUs in the system that support managed memory.
539
+ * All accesses to this pointer must obey the Unified Memory programming model.
540
+ *
541
+ * \p flags specifies the default stream association for this allocation.
542
+ * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The
543
+ * default value for \p flags is ::cudaMemAttachGlobal.
544
+ * If ::cudaMemAttachGlobal is specified, then this memory is accessible from
545
+ * any stream on any device. If ::cudaMemAttachHost is specified, then the
546
+ * allocation should not be accessed from devices that have a zero value for the
547
+ * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to
548
+ * ::cudaStreamAttachMemAsync will be required to enable access on such devices.
549
+ *
550
+ * If the association is later changed via ::cudaStreamAttachMemAsync to
551
+ * a single stream, the default association, as specifed during ::cudaMallocManaged,
552
+ * is restored when that stream is destroyed. For __managed__ variables, the
553
+ * default association is always ::cudaMemAttachGlobal. Note that destroying a
554
+ * stream is an asynchronous operation, and as a result, the change to default
555
+ * association won't happen until all work in the stream has completed.
556
+ *
557
+ * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree.
558
+ *
559
+ * Device memory oversubscription is possible for GPUs that have a non-zero value for the
560
+ * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on
561
+ * such GPUs may be evicted from device memory to host memory at any time by the Unified
562
+ * Memory driver in order to make room for other allocations.
563
+ *
564
+ * In a multi-GPU system where all GPUs have a non-zero value for the device attribute
565
+ * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this
566
+ * API returns and instead may be populated on access. In such systems, managed memory can
567
+ * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to
568
+ * maintain data locality and prevent excessive page faults to the extent possible. The application
569
+ * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application
570
+ * can also explicitly migrate memory to a desired processor's memory via
571
+ * ::cudaMemPrefetchAsync.
572
+ *
573
+ * In a multi-GPU system where all of the GPUs have a zero value for the device attribute
574
+ * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support
575
+ * with each other, the physical storage for managed memory is created on the GPU which is active
576
+ * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced
577
+ * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate
578
+ * memory among such GPUs.
579
+ *
580
+ * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and
581
+ * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess
582
+ * is zero for at least one of those GPUs, the location chosen for physical storage of managed
583
+ * memory is system-dependent.
584
+ * - On Linux, the location chosen will be device memory as long as the current set of active
585
+ * contexts are on devices that either have peer-to-peer support with each other or have a
586
+ * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess.
587
+ * If there is an active context on a GPU that does not have a non-zero value for that device
588
+ * attribute and it does not have peer-to-peer support with the other devices that have active
589
+ * contexts on them, then the location for physical storage will be 'zero-copy' or host memory.
590
+ * Note that this means that managed memory that is located in device memory is migrated to
591
+ * host memory if a new context is created on a GPU that doesn't have a non-zero value for
592
+ * the device attribute and does not support peer-to-peer with at least one of the other devices
593
+ * that has an active context. This in turn implies that context creation may fail if there is
594
+ * insufficient host memory to migrate all managed allocations.
595
+ * - On Windows, the physical storage is always created in 'zero-copy' or host memory.
596
+ * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these
597
+ * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to
598
+ * restrict CUDA to only use those GPUs that have peer-to-peer support.
599
+ * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero
600
+ * value to force the driver to always use device memory for physical storage.
601
+ * When this environment variable is set to a non-zero value, all devices used in
602
+ * that process that support managed memory have to be peer-to-peer compatible
603
+ * with each other. The error ::cudaErrorInvalidDevice will be returned if a device
604
+ * that supports managed memory is used and it is not peer-to-peer compatible with
605
+ * any of the other managed memory supporting devices that were previously used in
606
+ * that process, even if ::cudaDeviceReset has been called on those devices. These
607
+ * environment variables are described in the CUDA programming guide under the
608
+ * "CUDA environment variables" section.
609
+ * - On ARM, managed memory is not available on discrete gpu with Drive PX-2.
610
+ *
611
+ * \param devPtr - Pointer to allocated device memory
612
+ * \param size - Requested allocation size in bytes
613
+ * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal)
614
+ *
615
+ * \return
616
+ * ::cudaSuccess,
617
+ * ::cudaErrorMemoryAllocation,
618
+ * ::cudaErrorNotSupported,
619
+ * ::cudaErrorInvalidValue
620
+ * \note_init_rt
621
+ * \note_callback
622
+ *
623
+ * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray,
624
+ * ::cudaMalloc3D, ::cudaMalloc3DArray,
625
+ * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)",
626
+ * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync
627
+ */
628
+ template<class T>
629
+ static __inline__ __host__ cudaError_t cudaMallocManaged(
630
+ T **devPtr,
631
+ size_t size,
632
+ unsigned int flags = cudaMemAttachGlobal
633
+ )
634
+ {
635
+ return ::cudaMallocManaged((void**)(void*)devPtr, size, flags);
636
+ }
637
+
638
+ /**
639
+ * \brief Attach memory to a stream asynchronously
640
+ *
641
+ * Enqueues an operation in \p stream to specify stream association of
642
+ * \p length bytes of memory starting from \p devPtr. This function is a
643
+ * stream-ordered operation, meaning that it is dependent on, and will
644
+ * only take effect when, previous work in stream has completed. Any
645
+ * previous association is automatically replaced.
646
+ *
647
+ * \p devPtr must point to an one of the following types of memories:
648
+ * - managed memory declared using the __managed__ keyword or allocated with
649
+ * ::cudaMallocManaged.
650
+ * - a valid host-accessible region of system-allocated pageable memory. This
651
+ * type of memory may only be specified if the device associated with the
652
+ * stream reports a non-zero value for the device attribute
653
+ * ::cudaDevAttrPageableMemoryAccess.
654
+ *
655
+ * For managed allocations, \p length must be either zero or the entire
656
+ * allocation's size. Both indicate that the entire allocation's stream
657
+ * association is being changed. Currently, it is not possible to change stream
658
+ * association for a portion of a managed allocation.
659
+ *
660
+ * For pageable allocations, \p length must be non-zero.
661
+ *
662
+ * The stream association is specified using \p flags which must be
663
+ * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle.
664
+ * The default value for \p flags is ::cudaMemAttachSingle
665
+ * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed
666
+ * by any stream on any device.
667
+ * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee
668
+ * that it won't access the memory on the device from any stream on a device that
669
+ * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess.
670
+ * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with
671
+ * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess,
672
+ * the program makes a guarantee that it will only access the memory on the device
673
+ * from \p stream. It is illegal to attach singly to the NULL stream, because the
674
+ * NULL stream is a virtual global stream and not a specific stream. An error will
675
+ * be returned in this case.
676
+ *
677
+ * When memory is associated with a single stream, the Unified Memory system will
678
+ * allow CPU access to this memory region so long as all operations in \p stream
679
+ * have completed, regardless of whether other streams are active. In effect,
680
+ * this constrains exclusive ownership of the managed memory region by
681
+ * an active GPU to per-stream activity instead of whole-GPU activity.
682
+ *
683
+ * Accessing memory on the device from streams that are not associated with
684
+ * it will produce undefined results. No error checking is performed by the
685
+ * Unified Memory system to ensure that kernels launched into other streams
686
+ * do not access this region.
687
+ *
688
+ * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync
689
+ * via events, synchronization or other means to ensure legal access to memory
690
+ * at all times. Data visibility and coherency will be changed appropriately
691
+ * for all kernels which follow a stream-association change.
692
+ *
693
+ * If \p stream is destroyed while data is associated with it, the association is
694
+ * removed and the association reverts to the default visibility of the allocation
695
+ * as specified at ::cudaMallocManaged. For __managed__ variables, the default
696
+ * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an
697
+ * asynchronous operation, and as a result, the change to default association won't
698
+ * happen until all work in the stream has completed.
699
+ *
700
+ * \param stream - Stream in which to enqueue the attach operation
701
+ * \param devPtr - Pointer to memory (must be a pointer to managed memory or
702
+ * to a valid host-accessible region of system-allocated
703
+ * memory)
704
+ * \param length - Length of memory (defaults to zero)
705
+ * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle)
706
+ *
707
+ * \return
708
+ * ::cudaSuccess,
709
+ * ::cudaErrorNotReady,
710
+ * ::cudaErrorInvalidValue,
711
+ * ::cudaErrorInvalidResourceHandle
712
+ * \notefnerr
713
+ * \note_init_rt
714
+ * \note_callback
715
+ *
716
+ * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged
717
+ */
718
+ template<class T>
719
+ static __inline__ __host__ cudaError_t cudaStreamAttachMemAsync(
720
+ cudaStream_t stream,
721
+ T *devPtr,
722
+ size_t length = 0,
723
+ unsigned int flags = cudaMemAttachSingle
724
+ )
725
+ {
726
+ return ::cudaStreamAttachMemAsync(stream, (void*)devPtr, length, flags);
727
+ }
728
+
729
+ template<class T>
730
+ static __inline__ __host__ cudaError_t cudaMalloc(
731
+ T **devPtr,
732
+ size_t size
733
+ )
734
+ {
735
+ return ::cudaMalloc((void**)(void*)devPtr, size);
736
+ }
737
+
738
+ template<class T>
739
+ static __inline__ __host__ cudaError_t cudaMallocHost(
740
+ T **ptr,
741
+ size_t size,
742
+ unsigned int flags = 0
743
+ )
744
+ {
745
+ return cudaMallocHost((void**)(void*)ptr, size, flags);
746
+ }
747
+
748
+ template<class T>
749
+ static __inline__ __host__ cudaError_t cudaMallocPitch(
750
+ T **devPtr,
751
+ size_t *pitch,
752
+ size_t width,
753
+ size_t height
754
+ )
755
+ {
756
+ return ::cudaMallocPitch((void**)(void*)devPtr, pitch, width, height);
757
+ }
758
+
759
+ /**
760
+ * \brief Allocate from a pool
761
+ *
762
+ * This is an alternate spelling for cudaMallocFromPoolAsync
763
+ * made available through operator overloading.
764
+ *
765
+ * \sa ::cudaMallocFromPoolAsync,
766
+ * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaStream_t hStream) "cudaMallocAsync (C API)"
767
+ */
768
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
769
+ void **ptr,
770
+ size_t size,
771
+ cudaMemPool_t memPool,
772
+ cudaStream_t stream
773
+ )
774
+ {
775
+ return ::cudaMallocFromPoolAsync(ptr, size, memPool, stream);
776
+ }
777
+
778
+ template<class T>
779
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
780
+ T **ptr,
781
+ size_t size,
782
+ cudaMemPool_t memPool,
783
+ cudaStream_t stream
784
+ )
785
+ {
786
+ return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream);
787
+ }
788
+
789
+ template<class T>
790
+ static __inline__ __host__ cudaError_t cudaMallocAsync(
791
+ T **ptr,
792
+ size_t size,
793
+ cudaStream_t stream
794
+ )
795
+ {
796
+ return ::cudaMallocAsync((void**)(void*)ptr, size, stream);
797
+ }
798
+
799
+ template<class T>
800
+ static __inline__ __host__ cudaError_t cudaMallocFromPoolAsync(
801
+ T **ptr,
802
+ size_t size,
803
+ cudaMemPool_t memPool,
804
+ cudaStream_t stream
805
+ )
806
+ {
807
+ return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream);
808
+ }
809
+
810
+ #if defined(__CUDACC__)
811
+
812
+ /**
813
+ * \brief \hl Copies data to the given symbol on the device
814
+ *
815
+ * Copies \p count bytes from the memory area pointed to by \p src
816
+ * to the memory area \p offset bytes from the start of symbol
817
+ * \p symbol. The memory areas may not overlap. \p symbol is a variable that
818
+ * resides in global or constant memory space. \p kind can be either
819
+ * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice.
820
+ *
821
+ * \param symbol - Device symbol reference
822
+ * \param src - Source memory address
823
+ * \param count - Size in bytes to copy
824
+ * \param offset - Offset from start of symbol in bytes
825
+ * \param kind - Type of transfer
826
+ *
827
+ * \return
828
+ * ::cudaSuccess,
829
+ * ::cudaErrorInvalidValue,
830
+ * ::cudaErrorInvalidSymbol,
831
+ * ::cudaErrorInvalidMemcpyDirection,
832
+ * ::cudaErrorNoKernelImageForDevice
833
+ * \notefnerr
834
+ * \note_sync
835
+ * \note_string_api_deprecation
836
+ * \note_init_rt
837
+ * \note_callback
838
+ *
839
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
840
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
841
+ * ::cudaMemcpy2DArrayToArray,
842
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
843
+ * ::cudaMemcpy2DToArrayAsync,
844
+ * ::cudaMemcpy2DFromArrayAsync,
845
+ * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync
846
+ */
847
+ template<class T>
848
+ static __inline__ __host__ cudaError_t cudaMemcpyToSymbol(
849
+ const T &symbol,
850
+ const void *src,
851
+ size_t count,
852
+ size_t offset = 0,
853
+ enum cudaMemcpyKind kind = cudaMemcpyHostToDevice
854
+ )
855
+ {
856
+ return ::cudaMemcpyToSymbol((const void*)&symbol, src, count, offset, kind);
857
+ }
858
+
859
+ /**
860
+ * \brief \hl Copies data to the given symbol on the device
861
+ *
862
+ * Copies \p count bytes from the memory area pointed to by \p src
863
+ * to the memory area \p offset bytes from the start of symbol
864
+ * \p symbol. The memory areas may not overlap. \p symbol is a variable that
865
+ * resides in global or constant memory space. \p kind can be either
866
+ * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice.
867
+ *
868
+ * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so
869
+ * the call may return before the copy is complete. The copy can optionally
870
+ * be associated to a stream by passing a non-zero \p stream argument. If
871
+ * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy
872
+ * may overlap with operations in other streams.
873
+ *
874
+ * \param symbol - Device symbol reference
875
+ * \param src - Source memory address
876
+ * \param count - Size in bytes to copy
877
+ * \param offset - Offset from start of symbol in bytes
878
+ * \param kind - Type of transfer
879
+ * \param stream - Stream identifier
880
+ *
881
+ * \return
882
+ * ::cudaSuccess,
883
+ * ::cudaErrorInvalidValue,
884
+ * ::cudaErrorInvalidSymbol,
885
+ * ::cudaErrorInvalidMemcpyDirection,
886
+ * ::cudaErrorNoKernelImageForDevice
887
+ * \notefnerr
888
+ * \note_async
889
+ * \note_string_api_deprecation
890
+ * \note_init_rt
891
+ * \note_callback
892
+ *
893
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
894
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
895
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
896
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
897
+ * ::cudaMemcpy2DToArrayAsync,
898
+ * ::cudaMemcpy2DFromArrayAsync,
899
+ * ::cudaMemcpyFromSymbolAsync
900
+ */
901
+ template<class T>
902
+ static __inline__ __host__ cudaError_t cudaMemcpyToSymbolAsync(
903
+ const T &symbol,
904
+ const void *src,
905
+ size_t count,
906
+ size_t offset = 0,
907
+ enum cudaMemcpyKind kind = cudaMemcpyHostToDevice,
908
+ cudaStream_t stream = 0
909
+ )
910
+ {
911
+ return ::cudaMemcpyToSymbolAsync((const void*)&symbol, src, count, offset, kind, stream);
912
+ }
913
+
914
+ /**
915
+ * \brief \hl Copies data from the given symbol on the device
916
+ *
917
+ * Copies \p count bytes from the memory area \p offset bytes
918
+ * from the start of symbol \p symbol to the memory area pointed to by \p dst.
919
+ * The memory areas may not overlap. \p symbol is a variable that
920
+ * resides in global or constant memory space. \p kind can be either
921
+ * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice.
922
+ *
923
+ * \param dst - Destination memory address
924
+ * \param symbol - Device symbol reference
925
+ * \param count - Size in bytes to copy
926
+ * \param offset - Offset from start of symbol in bytes
927
+ * \param kind - Type of transfer
928
+ *
929
+ * \return
930
+ * ::cudaSuccess,
931
+ * ::cudaErrorInvalidValue,
932
+ * ::cudaErrorInvalidSymbol,
933
+ * ::cudaErrorInvalidMemcpyDirection,
934
+ * ::cudaErrorNoKernelImageForDevice
935
+ * \notefnerr
936
+ * \note_sync
937
+ * \note_string_api_deprecation
938
+ * \note_init_rt
939
+ * \note_callback
940
+ *
941
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
942
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
943
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
944
+ * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
945
+ * ::cudaMemcpy2DToArrayAsync,
946
+ * ::cudaMemcpy2DFromArrayAsync,
947
+ * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync
948
+ */
949
+ template<class T>
950
+ static __inline__ __host__ cudaError_t cudaMemcpyFromSymbol(
951
+ void *dst,
952
+ const T &symbol,
953
+ size_t count,
954
+ size_t offset = 0,
955
+ enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost
956
+ )
957
+ {
958
+ return ::cudaMemcpyFromSymbol(dst, (const void*)&symbol, count, offset, kind);
959
+ }
960
+
961
+ /**
962
+ * \brief \hl Copies data from the given symbol on the device
963
+ *
964
+ * Copies \p count bytes from the memory area \p offset bytes
965
+ * from the start of symbol \p symbol to the memory area pointed to by \p dst.
966
+ * The memory areas may not overlap. \p symbol is a variable that resides in
967
+ * global or constant memory space. \p kind can be either
968
+ * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice.
969
+ *
970
+ * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so
971
+ * the call may return before the copy is complete. The copy can optionally be
972
+ * associated to a stream by passing a non-zero \p stream argument. If \p kind
973
+ * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap
974
+ * with operations in other streams.
975
+ *
976
+ * \param dst - Destination memory address
977
+ * \param symbol - Device symbol reference
978
+ * \param count - Size in bytes to copy
979
+ * \param offset - Offset from start of symbol in bytes
980
+ * \param kind - Type of transfer
981
+ * \param stream - Stream identifier
982
+ *
983
+ * \return
984
+ * ::cudaSuccess,
985
+ * ::cudaErrorInvalidValue,
986
+ * ::cudaErrorInvalidSymbol,
987
+ * ::cudaErrorInvalidMemcpyDirection,
988
+ * ::cudaErrorNoKernelImageForDevice
989
+ * \notefnerr
990
+ * \note_async
991
+ * \note_string_api_deprecation
992
+ * \note_init_rt
993
+ * \note_callback
994
+ *
995
+ * \sa ::cudaMemcpy, ::cudaMemcpy2D,
996
+ * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,
997
+ * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,
998
+ * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,
999
+ * ::cudaMemcpy2DToArrayAsync,
1000
+ * ::cudaMemcpy2DFromArrayAsync,
1001
+ * ::cudaMemcpyToSymbolAsync
1002
+ */
1003
+ template<class T>
1004
+ static __inline__ __host__ cudaError_t cudaMemcpyFromSymbolAsync(
1005
+ void *dst,
1006
+ const T &symbol,
1007
+ size_t count,
1008
+ size_t offset = 0,
1009
+ enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost,
1010
+ cudaStream_t stream = 0
1011
+ )
1012
+ {
1013
+ return ::cudaMemcpyFromSymbolAsync(dst, (const void*)&symbol, count, offset, kind, stream);
1014
+ }
1015
+
1016
+ /**
1017
+ * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph
1018
+ *
1019
+ * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with
1020
+ * \p numDependencies dependencies specified via \p pDependencies.
1021
+ * It is possible for \p numDependencies to be 0, in which case the node will be placed
1022
+ * at the root of the graph. \p pDependencies may not have any duplicate entries.
1023
+ * A handle to the new node will be returned in \p pGraphNode.
1024
+ *
1025
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1026
+ * pointed to by \p src to the memory area pointed to by \p offset bytes from the start
1027
+ * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that
1028
+ * resides in global or constant memory space. \p kind can be either
1029
+ * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1030
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of
1031
+ * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault
1032
+ * is only allowed on systems that support unified virtual addressing.
1033
+ *
1034
+ * Memcpy nodes have some additional restrictions with regards to managed memory, if the
1035
+ * system contains at least one device which has a zero value for the device attribute
1036
+ * ::cudaDevAttrConcurrentManagedAccess.
1037
+ *
1038
+ * \param pGraphNode - Returns newly created node
1039
+ * \param graph - Graph to which to add the node
1040
+ * \param pDependencies - Dependencies of the node
1041
+ * \param numDependencies - Number of dependencies
1042
+ * \param symbol - Device symbol address
1043
+ * \param src - Source memory address
1044
+ * \param count - Size in bytes to copy
1045
+ * \param offset - Offset from start of symbol in bytes
1046
+ * \param kind - Type of transfer
1047
+ *
1048
+ * \return
1049
+ * ::cudaSuccess,
1050
+ * ::cudaErrorInvalidValue
1051
+ * \note_graph_thread_safety
1052
+ * \notefnerr
1053
+ * \note_init_rt
1054
+ * \note_callback
1055
+ *
1056
+ * \sa
1057
+ * ::cudaMemcpyToSymbol,
1058
+ * ::cudaGraphAddMemcpyNode,
1059
+ * ::cudaGraphAddMemcpyNodeFromSymbol,
1060
+ * ::cudaGraphMemcpyNodeGetParams,
1061
+ * ::cudaGraphMemcpyNodeSetParams,
1062
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1063
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1064
+ * ::cudaGraphCreate,
1065
+ * ::cudaGraphDestroyNode,
1066
+ * ::cudaGraphAddChildGraphNode,
1067
+ * ::cudaGraphAddEmptyNode,
1068
+ * ::cudaGraphAddKernelNode,
1069
+ * ::cudaGraphAddHostNode,
1070
+ * ::cudaGraphAddMemsetNode
1071
+ */
1072
+ template<class T>
1073
+ static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeToSymbol(
1074
+ cudaGraphNode_t *pGraphNode,
1075
+ cudaGraph_t graph,
1076
+ const cudaGraphNode_t *pDependencies,
1077
+ size_t numDependencies,
1078
+ const T &symbol,
1079
+ const void* src,
1080
+ size_t count,
1081
+ size_t offset,
1082
+ enum cudaMemcpyKind kind)
1083
+ {
1084
+ return ::cudaGraphAddMemcpyNodeToSymbol(pGraphNode, graph, pDependencies, numDependencies, (const void*)&symbol, src, count, offset, kind);
1085
+ }
1086
+
1087
+ /**
1088
+ * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph
1089
+ *
1090
+ * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with
1091
+ * \p numDependencies dependencies specified via \p pDependencies.
1092
+ * It is possible for \p numDependencies to be 0, in which case the node will be placed
1093
+ * at the root of the graph. \p pDependencies may not have any duplicate entries.
1094
+ * A handle to the new node will be returned in \p pGraphNode.
1095
+ *
1096
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1097
+ * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area
1098
+ * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable
1099
+ * that resides in global or constant memory space. \p kind can be either
1100
+ * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1101
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer
1102
+ * is inferred from the pointer values. However, ::cudaMemcpyDefault is only
1103
+ * allowed on systems that support unified virtual addressing.
1104
+ *
1105
+ * Memcpy nodes have some additional restrictions with regards to managed memory, if the
1106
+ * system contains at least one device which has a zero value for the device attribute
1107
+ * ::cudaDevAttrConcurrentManagedAccess.
1108
+ *
1109
+ * \param pGraphNode - Returns newly created node
1110
+ * \param graph - Graph to which to add the node
1111
+ * \param pDependencies - Dependencies of the node
1112
+ * \param numDependencies - Number of dependencies
1113
+ * \param dst - Destination memory address
1114
+ * \param symbol - Device symbol address
1115
+ * \param count - Size in bytes to copy
1116
+ * \param offset - Offset from start of symbol in bytes
1117
+ * \param kind - Type of transfer
1118
+ *
1119
+ * \return
1120
+ * ::cudaSuccess,
1121
+ * ::cudaErrorInvalidValue
1122
+ * \note_graph_thread_safety
1123
+ * \notefnerr
1124
+ * \note_init_rt
1125
+ * \note_callback
1126
+ *
1127
+ * \sa
1128
+ * ::cudaMemcpyFromSymbol,
1129
+ * ::cudaGraphAddMemcpyNode,
1130
+ * ::cudaGraphAddMemcpyNodeToSymbol,
1131
+ * ::cudaGraphMemcpyNodeGetParams,
1132
+ * ::cudaGraphMemcpyNodeSetParams,
1133
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1134
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1135
+ * ::cudaGraphCreate,
1136
+ * ::cudaGraphDestroyNode,
1137
+ * ::cudaGraphAddChildGraphNode,
1138
+ * ::cudaGraphAddEmptyNode,
1139
+ * ::cudaGraphAddKernelNode,
1140
+ * ::cudaGraphAddHostNode,
1141
+ * ::cudaGraphAddMemsetNode
1142
+ */
1143
+ template<class T>
1144
+ static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeFromSymbol(
1145
+ cudaGraphNode_t* pGraphNode,
1146
+ cudaGraph_t graph,
1147
+ const cudaGraphNode_t* pDependencies,
1148
+ size_t numDependencies,
1149
+ void* dst,
1150
+ const T &symbol,
1151
+ size_t count,
1152
+ size_t offset,
1153
+ enum cudaMemcpyKind kind)
1154
+ {
1155
+ return ::cudaGraphAddMemcpyNodeFromSymbol(pGraphNode, graph, pDependencies, numDependencies, dst, (const void*)&symbol, count, offset, kind);
1156
+ }
1157
+
1158
+ /**
1159
+ * \brief Sets a memcpy node's parameters to copy to a symbol on the device
1160
+ *
1161
+ * Sets the parameters of memcpy node \p node to the copy described by the provided parameters.
1162
+ *
1163
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1164
+ * pointed to by \p src to the memory area pointed to by \p offset bytes from the start
1165
+ * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that
1166
+ * resides in global or constant memory space. \p kind can be either
1167
+ * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1168
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of
1169
+ * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault
1170
+ * is only allowed on systems that support unified virtual addressing.
1171
+ *
1172
+ * \param node - Node to set the parameters for
1173
+ * \param symbol - Device symbol address
1174
+ * \param src - Source memory address
1175
+ * \param count - Size in bytes to copy
1176
+ * \param offset - Offset from start of symbol in bytes
1177
+ * \param kind - Type of transfer
1178
+ *
1179
+ * \return
1180
+ * ::cudaSuccess,
1181
+ * ::cudaErrorInvalidValue
1182
+ * \note_graph_thread_safety
1183
+ * \notefnerr
1184
+ * \note_init_rt
1185
+ * \note_callback
1186
+ *
1187
+ * \sa
1188
+ * ::cudaMemcpyToSymbol,
1189
+ * ::cudaGraphMemcpyNodeSetParams,
1190
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1191
+ * ::cudaGraphAddMemcpyNode,
1192
+ * ::cudaGraphMemcpyNodeGetParams
1193
+ */
1194
+ template<class T>
1195
+ static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsToSymbol(
1196
+ cudaGraphNode_t node,
1197
+ const T &symbol,
1198
+ const void* src,
1199
+ size_t count,
1200
+ size_t offset,
1201
+ enum cudaMemcpyKind kind)
1202
+ {
1203
+ return ::cudaGraphMemcpyNodeSetParamsToSymbol(node, (const void*)&symbol, src, count, offset, kind);
1204
+ }
1205
+
1206
+ /**
1207
+ * \brief Sets a memcpy node's parameters to copy from a symbol on the device
1208
+ *
1209
+ * Sets the parameters of memcpy node \p node to the copy described by the provided parameters.
1210
+ *
1211
+ * When the graph is launched, the node will copy \p count bytes from the memory area
1212
+ * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area
1213
+ * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable
1214
+ * that resides in global or constant memory space. \p kind can be either
1215
+ * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault.
1216
+ * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer
1217
+ * is inferred from the pointer values. However, ::cudaMemcpyDefault is only
1218
+ * allowed on systems that support unified virtual addressing.
1219
+ *
1220
+ * \param node - Node to set the parameters for
1221
+ * \param dst - Destination memory address
1222
+ * \param symbol - Device symbol address
1223
+ * \param count - Size in bytes to copy
1224
+ * \param offset - Offset from start of symbol in bytes
1225
+ * \param kind - Type of transfer
1226
+ *
1227
+ * \return
1228
+ * ::cudaSuccess,
1229
+ * ::cudaErrorInvalidValue
1230
+ * \note_graph_thread_safety
1231
+ * \notefnerr
1232
+ * \note_init_rt
1233
+ * \note_callback
1234
+ *
1235
+ * \sa
1236
+ * ::cudaMemcpyFromSymbol,
1237
+ * ::cudaGraphMemcpyNodeSetParams,
1238
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1239
+ * ::cudaGraphAddMemcpyNode,
1240
+ * ::cudaGraphMemcpyNodeGetParams
1241
+ */
1242
+ template<class T>
1243
+ static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsFromSymbol(
1244
+ cudaGraphNode_t node,
1245
+ void* dst,
1246
+ const T &symbol,
1247
+ size_t count,
1248
+ size_t offset,
1249
+ enum cudaMemcpyKind kind)
1250
+ {
1251
+ return ::cudaGraphMemcpyNodeSetParamsFromSymbol(node, dst, (const void*)&symbol, count, offset, kind);
1252
+ }
1253
+
1254
+ /**
1255
+ * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device
1256
+ *
1257
+ * Updates the work represented by \p node in \p hGraphExec as though \p node had
1258
+ * contained the given params at instantiation. \p node must remain in the graph which was
1259
+ * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored.
1260
+ *
1261
+ * \p src and \p symbol must be allocated from the same contexts as the original source and
1262
+ * destination memory. The instantiation-time memory operands must be 1-dimensional.
1263
+ * Zero-length operations are not supported.
1264
+ *
1265
+ * The modifications only affect future launches of \p hGraphExec. Already enqueued
1266
+ * or running launches of \p hGraphExec are not affected by this call. \p node is also
1267
+ * not modified by this call.
1268
+ *
1269
+ * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or
1270
+ * the original memory operands are multidimensional.
1271
+ *
1272
+ * \param hGraphExec - The executable graph in which to set the specified node
1273
+ * \param node - Memcpy node from the graph which was used to instantiate graphExec
1274
+ * \param symbol - Device symbol address
1275
+ * \param src - Source memory address
1276
+ * \param count - Size in bytes to copy
1277
+ * \param offset - Offset from start of symbol in bytes
1278
+ * \param kind - Type of transfer
1279
+ *
1280
+ * \return
1281
+ * ::cudaSuccess,
1282
+ * ::cudaErrorInvalidValue
1283
+ * \note_graph_thread_safety
1284
+ * \notefnerr
1285
+ * \note_init_rt
1286
+ * \note_callback
1287
+ *
1288
+ * \sa
1289
+ * ::cudaGraphAddMemcpyNode,
1290
+ * ::cudaGraphAddMemcpyNodeToSymbol,
1291
+ * ::cudaGraphMemcpyNodeSetParams,
1292
+ * ::cudaGraphMemcpyNodeSetParamsToSymbol,
1293
+ * ::cudaGraphInstantiate,
1294
+ * ::cudaGraphExecMemcpyNodeSetParams,
1295
+ * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol,
1296
+ * ::cudaGraphExecKernelNodeSetParams,
1297
+ * ::cudaGraphExecMemsetNodeSetParams,
1298
+ * ::cudaGraphExecHostNodeSetParams
1299
+ */
1300
+ template<class T>
1301
+ static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsToSymbol(
1302
+ cudaGraphExec_t hGraphExec,
1303
+ cudaGraphNode_t node,
1304
+ const T &symbol,
1305
+ const void* src,
1306
+ size_t count,
1307
+ size_t offset,
1308
+ enum cudaMemcpyKind kind)
1309
+ {
1310
+ return ::cudaGraphExecMemcpyNodeSetParamsToSymbol(hGraphExec, node, (const void*)&symbol, src, count, offset, kind);
1311
+ }
1312
+
1313
+ /**
1314
+ * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device
1315
+ *
1316
+ * Updates the work represented by \p node in \p hGraphExec as though \p node had
1317
+ * contained the given params at instantiation. \p node must remain in the graph which was
1318
+ * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored.
1319
+ *
1320
+ * \p symbol and \p dst must be allocated from the same contexts as the original source and
1321
+ * destination memory. The instantiation-time memory operands must be 1-dimensional.
1322
+ * Zero-length operations are not supported.
1323
+ *
1324
+ * The modifications only affect future launches of \p hGraphExec. Already enqueued
1325
+ * or running launches of \p hGraphExec are not affected by this call. \p node is also
1326
+ * not modified by this call.
1327
+ *
1328
+ * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or
1329
+ * the original memory operands are multidimensional.
1330
+ *
1331
+ * \param hGraphExec - The executable graph in which to set the specified node
1332
+ * \param node - Memcpy node from the graph which was used to instantiate graphExec
1333
+ * \param dst - Destination memory address
1334
+ * \param symbol - Device symbol address
1335
+ * \param count - Size in bytes to copy
1336
+ * \param offset - Offset from start of symbol in bytes
1337
+ * \param kind - Type of transfer
1338
+ *
1339
+ * \return
1340
+ * ::cudaSuccess,
1341
+ * ::cudaErrorInvalidValue
1342
+ * \note_graph_thread_safety
1343
+ * \notefnerr
1344
+ * \note_init_rt
1345
+ * \note_callback
1346
+ *
1347
+ * \sa
1348
+ * ::cudaGraphAddMemcpyNode,
1349
+ * ::cudaGraphAddMemcpyNodeFromSymbol,
1350
+ * ::cudaGraphMemcpyNodeSetParams,
1351
+ * ::cudaGraphMemcpyNodeSetParamsFromSymbol,
1352
+ * ::cudaGraphInstantiate,
1353
+ * ::cudaGraphExecMemcpyNodeSetParams,
1354
+ * ::cudaGraphExecMemcpyNodeSetParamsToSymbol,
1355
+ * ::cudaGraphExecKernelNodeSetParams,
1356
+ * ::cudaGraphExecMemsetNodeSetParams,
1357
+ * ::cudaGraphExecHostNodeSetParams
1358
+ */
1359
+ template<class T>
1360
+ static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsFromSymbol(
1361
+ cudaGraphExec_t hGraphExec,
1362
+ cudaGraphNode_t node,
1363
+ void* dst,
1364
+ const T &symbol,
1365
+ size_t count,
1366
+ size_t offset,
1367
+ enum cudaMemcpyKind kind)
1368
+ {
1369
+ return ::cudaGraphExecMemcpyNodeSetParamsFromSymbol(hGraphExec, node, dst, (const void*)&symbol, count, offset, kind);
1370
+ }
1371
+
1372
+ // convenience function to avoid source breakage in c++ code
1373
+ static __inline__ __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out)
1374
+ {
1375
+ cudaGraphExecUpdateResultInfo resultInfo;
1376
+ cudaError_t status = cudaGraphExecUpdate(hGraphExec, hGraph, &resultInfo);
1377
+ if (hErrorNode_out) {
1378
+ *hErrorNode_out = resultInfo.errorNode;
1379
+ }
1380
+ if (updateResult_out) {
1381
+ *updateResult_out = resultInfo.result;
1382
+ }
1383
+ return status;
1384
+ }
1385
+
1386
+ #if __cplusplus >= 201103
1387
+
1388
+ /**
1389
+ * \brief Creates a user object by wrapping a C++ object
1390
+ *
1391
+ * TODO detail
1392
+ *
1393
+ * \param object_out - Location to return the user object handle
1394
+ * \param objectToWrap - This becomes the \ptr argument to ::cudaUserObjectCreate. A
1395
+ * lambda will be passed for the \p destroy argument, which calls
1396
+ * delete on this object pointer.
1397
+ * \param initialRefcount - The initial refcount to create the object with, typically 1. The
1398
+ * initial references are owned by the calling thread.
1399
+ * \param flags - Currently it is required to pass cudaUserObjectNoDestructorSync,
1400
+ * which is the only defined flag. This indicates that the destroy
1401
+ * callback cannot be waited on by any CUDA API. Users requiring
1402
+ * synchronization of the callback should signal its completion
1403
+ * manually.
1404
+ *
1405
+ * \return
1406
+ * ::cudaSuccess,
1407
+ * ::cudaErrorInvalidValue
1408
+ *
1409
+ * \sa
1410
+ * ::cudaUserObjectCreate
1411
+ */
1412
+ template<class T>
1413
+ static __inline__ __host__ cudaError_t cudaUserObjectCreate(
1414
+ cudaUserObject_t *object_out,
1415
+ T *objectToWrap,
1416
+ unsigned int initialRefcount,
1417
+ unsigned int flags)
1418
+ {
1419
+ return ::cudaUserObjectCreate(
1420
+ object_out,
1421
+ objectToWrap,
1422
+ [](void *vpObj) { delete reinterpret_cast<T *>(vpObj); },
1423
+ initialRefcount,
1424
+ flags);
1425
+ }
1426
+
1427
+ template<class T>
1428
+ static __inline__ __host__ cudaError_t cudaUserObjectCreate(
1429
+ cudaUserObject_t *object_out,
1430
+ T *objectToWrap,
1431
+ unsigned int initialRefcount,
1432
+ cudaUserObjectFlags flags)
1433
+ {
1434
+ return cudaUserObjectCreate(object_out, objectToWrap, initialRefcount, (unsigned int)flags);
1435
+ }
1436
+
1437
+ #endif
1438
+
1439
+ /**
1440
+ * \brief \hl Finds the address associated with a CUDA symbol
1441
+ *
1442
+ * Returns in \p *devPtr the address of symbol \p symbol on the device.
1443
+ * \p symbol can either be a variable that resides in global or constant memory space.
1444
+ * If \p symbol cannot be found, or if \p symbol is not declared
1445
+ * in the global or constant memory space, \p *devPtr is unchanged and the error
1446
+ * ::cudaErrorInvalidSymbol is returned.
1447
+ *
1448
+ * \param devPtr - Return device pointer associated with symbol
1449
+ * \param symbol - Device symbol reference
1450
+ *
1451
+ * \return
1452
+ * ::cudaSuccess,
1453
+ * ::cudaErrorInvalidSymbol,
1454
+ * ::cudaErrorNoKernelImageForDevice
1455
+ * \notefnerr
1456
+ * \note_init_rt
1457
+ * \note_callback
1458
+ *
1459
+ * \sa \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)",
1460
+ * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)"
1461
+ */
1462
+ template<class T>
1463
+ static __inline__ __host__ cudaError_t cudaGetSymbolAddress(
1464
+ void **devPtr,
1465
+ const T &symbol
1466
+ )
1467
+ {
1468
+ return ::cudaGetSymbolAddress(devPtr, (const void*)&symbol);
1469
+ }
1470
+
1471
+ /**
1472
+ * \brief \hl Finds the size of the object associated with a CUDA symbol
1473
+ *
1474
+ * Returns in \p *size the size of symbol \p symbol. \p symbol must be a
1475
+ * variable that resides in global or constant memory space.
1476
+ * If \p symbol cannot be found, or if \p symbol is not declared
1477
+ * in global or constant memory space, \p *size is unchanged and the error
1478
+ * ::cudaErrorInvalidSymbol is returned.
1479
+ *
1480
+ * \param size - Size of object associated with symbol
1481
+ * \param symbol - Device symbol reference
1482
+ *
1483
+ * \return
1484
+ * ::cudaSuccess,
1485
+ * ::cudaErrorInvalidSymbol,
1486
+ * ::cudaErrorNoKernelImageForDevice
1487
+ * \notefnerr
1488
+ * \note_init_rt
1489
+ * \note_callback
1490
+ *
1491
+ * \sa \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)",
1492
+ * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)"
1493
+ */
1494
+ template<class T>
1495
+ static __inline__ __host__ cudaError_t cudaGetSymbolSize(
1496
+ size_t *size,
1497
+ const T &symbol
1498
+ )
1499
+ {
1500
+ return ::cudaGetSymbolSize(size, (const void*)&symbol);
1501
+ }
1502
+
1503
+ /**
1504
+ * \brief \hl Sets the preferred cache configuration for a device function
1505
+ *
1506
+ * On devices where the L1 cache and shared memory use the same hardware
1507
+ * resources, this sets through \p cacheConfig the preferred cache configuration
1508
+ * for the function specified via \p func. This is only a preference. The
1509
+ * runtime will use the requested configuration if possible, but it is free to
1510
+ * choose a different configuration if required to execute \p func.
1511
+ *
1512
+ * \p func must be a pointer to a function that executes on the device.
1513
+ * The parameter specified by \p func must be declared as a \p __global__
1514
+ * function. If the specified function does not exist,
1515
+ * then ::cudaErrorInvalidDeviceFunction is returned.
1516
+ *
1517
+ * This setting does nothing on devices where the size of the L1 cache and
1518
+ * shared memory are fixed.
1519
+ *
1520
+ * Launching a kernel with a different preference than the most recent
1521
+ * preference setting may insert a device-side synchronization point.
1522
+ *
1523
+ * The supported cache configurations are:
1524
+ * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)
1525
+ * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache
1526
+ * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory
1527
+ *
1528
+ * \param func - device function pointer
1529
+ * \param cacheConfig - Requested cache configuration
1530
+ *
1531
+ * \return
1532
+ * ::cudaSuccess,
1533
+ * ::cudaErrorInvalidDeviceFunction
1534
+ * \notefnerr
1535
+ * \note_init_rt
1536
+ * \note_callback
1537
+ *
1538
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
1539
+ * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)",
1540
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)",
1541
+ * ::cudaSetDoubleForDevice,
1542
+ * ::cudaSetDoubleForHost,
1543
+ * ::cudaThreadGetCacheConfig,
1544
+ * ::cudaThreadSetCacheConfig
1545
+ */
1546
+ template<class T>
1547
+ static __inline__ __host__ cudaError_t cudaFuncSetCacheConfig(
1548
+ T *func,
1549
+ enum cudaFuncCache cacheConfig
1550
+ )
1551
+ {
1552
+ return ::cudaFuncSetCacheConfig((const void*)func, cacheConfig);
1553
+ }
1554
+
1555
+ template<class T>
1556
+ static __inline__ __host__ cudaError_t cudaFuncSetSharedMemConfig(
1557
+ T *func,
1558
+ enum cudaSharedMemConfig config
1559
+ )
1560
+ {
1561
+ return ::cudaFuncSetSharedMemConfig((const void*)func, config);
1562
+ }
1563
+
1564
+ #endif // __CUDACC__
1565
+
1566
+ /**
1567
+ * \brief Returns occupancy for a device function
1568
+ *
1569
+ * Returns in \p *numBlocks the maximum number of active blocks per
1570
+ * streaming multiprocessor for the device function.
1571
+ *
1572
+ * \param numBlocks - Returned occupancy
1573
+ * \param func - Kernel function for which occupancy is calulated
1574
+ * \param blockSize - Block size the kernel is intended to be launched with
1575
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1576
+ *
1577
+ * \return
1578
+ * ::cudaSuccess,
1579
+ * ::cudaErrorInvalidDevice,
1580
+ * ::cudaErrorInvalidDeviceFunction,
1581
+ * ::cudaErrorInvalidValue,
1582
+ * ::cudaErrorUnknown,
1583
+ * \notefnerr
1584
+ * \note_init_rt
1585
+ * \note_callback
1586
+ *
1587
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1588
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1589
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1590
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1591
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1592
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1593
+ */
1594
+ template<class T>
1595
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor(
1596
+ int *numBlocks,
1597
+ T func,
1598
+ int blockSize,
1599
+ size_t dynamicSMemSize)
1600
+ {
1601
+ return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, cudaOccupancyDefault);
1602
+ }
1603
+
1604
+ /**
1605
+ * \brief Returns occupancy for a device function with the specified flags
1606
+ *
1607
+ * Returns in \p *numBlocks the maximum number of active blocks per
1608
+ * streaming multiprocessor for the device function.
1609
+ *
1610
+ * The \p flags parameter controls how special cases are handled. Valid flags include:
1611
+ *
1612
+ * - ::cudaOccupancyDefault: keeps the default behavior as
1613
+ * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1614
+ *
1615
+ * - ::cudaOccupancyDisableCachingOverride: suppresses the default behavior
1616
+ * on platform where global caching affects occupancy. On such platforms, if caching
1617
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
1618
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
1619
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
1620
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
1621
+ * section of the Maxwell tuning guide.
1622
+ *
1623
+ * \param numBlocks - Returned occupancy
1624
+ * \param func - Kernel function for which occupancy is calulated
1625
+ * \param blockSize - Block size the kernel is intended to be launched with
1626
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1627
+ * \param flags - Requested behavior for the occupancy calculator
1628
+ *
1629
+ * \return
1630
+ * ::cudaSuccess,
1631
+ * ::cudaErrorInvalidDevice,
1632
+ * ::cudaErrorInvalidDeviceFunction,
1633
+ * ::cudaErrorInvalidValue,
1634
+ * ::cudaErrorUnknown,
1635
+ * \notefnerr
1636
+ * \note_init_rt
1637
+ * \note_callback
1638
+ *
1639
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1640
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1641
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1642
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1643
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1644
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1645
+ */
1646
+ template<class T>
1647
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
1648
+ int *numBlocks,
1649
+ T func,
1650
+ int blockSize,
1651
+ size_t dynamicSMemSize,
1652
+ unsigned int flags)
1653
+ {
1654
+ return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, flags);
1655
+ }
1656
+
1657
+ /**
1658
+ * Helper functor for cudaOccupancyMaxPotentialBlockSize
1659
+ */
1660
+ class __cudaOccupancyB2DHelper {
1661
+ size_t n;
1662
+ public:
1663
+ inline __host__ CUDART_DEVICE __cudaOccupancyB2DHelper(size_t n_) : n(n_) {}
1664
+ inline __host__ CUDART_DEVICE size_t operator()(int)
1665
+ {
1666
+ return n;
1667
+ }
1668
+ };
1669
+
1670
+ /**
1671
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1672
+ *
1673
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1674
+ * block size pair that achieves the best potential occupancy
1675
+ * (i.e. the maximum number of active warps with the smallest number
1676
+ * of blocks).
1677
+ *
1678
+ * The \p flags parameter controls how special cases are handled. Valid flags include:
1679
+ *
1680
+ * - ::cudaOccupancyDefault: keeps the default behavior as
1681
+ * ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1682
+ *
1683
+ * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior
1684
+ * on platform where global caching affects occupancy. On such platforms, if caching
1685
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
1686
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
1687
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
1688
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
1689
+ * section of the Maxwell tuning guide.
1690
+ *
1691
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1692
+ * \param blockSize - Returned block size
1693
+ * \param func - Device function symbol
1694
+ * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
1695
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1696
+ * \param flags - Requested behavior for the occupancy calculator
1697
+ *
1698
+ * \return
1699
+ * ::cudaSuccess,
1700
+ * ::cudaErrorInvalidDevice,
1701
+ * ::cudaErrorInvalidDeviceFunction,
1702
+ * ::cudaErrorInvalidValue,
1703
+ * ::cudaErrorUnknown,
1704
+ * \notefnerr
1705
+ * \note_init_rt
1706
+ * \note_callback
1707
+ *
1708
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1709
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1710
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1711
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1712
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1713
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1714
+ */
1715
+
1716
+ template<typename UnaryFunction, class T>
1717
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(
1718
+ int *minGridSize,
1719
+ int *blockSize,
1720
+ T func,
1721
+ UnaryFunction blockSizeToDynamicSMemSize,
1722
+ int blockSizeLimit = 0,
1723
+ unsigned int flags = 0)
1724
+ {
1725
+ cudaError_t status;
1726
+
1727
+ // Device and function properties
1728
+ int device;
1729
+ struct cudaFuncAttributes attr;
1730
+
1731
+ // Limits
1732
+ int maxThreadsPerMultiProcessor;
1733
+ int warpSize;
1734
+ int devMaxThreadsPerBlock;
1735
+ int multiProcessorCount;
1736
+ int funcMaxThreadsPerBlock;
1737
+ int occupancyLimit;
1738
+ int granularity;
1739
+
1740
+ // Recorded maximum
1741
+ int maxBlockSize = 0;
1742
+ int numBlocks = 0;
1743
+ int maxOccupancy = 0;
1744
+
1745
+ // Temporary
1746
+ int blockSizeToTryAligned;
1747
+ int blockSizeToTry;
1748
+ int blockSizeLimitAligned;
1749
+ int occupancyInBlocks;
1750
+ int occupancyInThreads;
1751
+ size_t dynamicSMemSize;
1752
+
1753
+ ///////////////////////////
1754
+ // Check user input
1755
+ ///////////////////////////
1756
+
1757
+ if (!minGridSize || !blockSize || !func) {
1758
+ return cudaErrorInvalidValue;
1759
+ }
1760
+
1761
+ //////////////////////////////////////////////
1762
+ // Obtain device and function properties
1763
+ //////////////////////////////////////////////
1764
+
1765
+ status = ::cudaGetDevice(&device);
1766
+ if (status != cudaSuccess) {
1767
+ return status;
1768
+ }
1769
+
1770
+ status = cudaDeviceGetAttribute(
1771
+ &maxThreadsPerMultiProcessor,
1772
+ cudaDevAttrMaxThreadsPerMultiProcessor,
1773
+ device);
1774
+ if (status != cudaSuccess) {
1775
+ return status;
1776
+ }
1777
+
1778
+ status = cudaDeviceGetAttribute(
1779
+ &warpSize,
1780
+ cudaDevAttrWarpSize,
1781
+ device);
1782
+ if (status != cudaSuccess) {
1783
+ return status;
1784
+ }
1785
+
1786
+ status = cudaDeviceGetAttribute(
1787
+ &devMaxThreadsPerBlock,
1788
+ cudaDevAttrMaxThreadsPerBlock,
1789
+ device);
1790
+ if (status != cudaSuccess) {
1791
+ return status;
1792
+ }
1793
+
1794
+ status = cudaDeviceGetAttribute(
1795
+ &multiProcessorCount,
1796
+ cudaDevAttrMultiProcessorCount,
1797
+ device);
1798
+ if (status != cudaSuccess) {
1799
+ return status;
1800
+ }
1801
+
1802
+ status = cudaFuncGetAttributes(&attr, func);
1803
+ if (status != cudaSuccess) {
1804
+ return status;
1805
+ }
1806
+
1807
+ funcMaxThreadsPerBlock = attr.maxThreadsPerBlock;
1808
+
1809
+ /////////////////////////////////////////////////////////////////////////////////
1810
+ // Try each block size, and pick the block size with maximum occupancy
1811
+ /////////////////////////////////////////////////////////////////////////////////
1812
+
1813
+ occupancyLimit = maxThreadsPerMultiProcessor;
1814
+ granularity = warpSize;
1815
+
1816
+ if (blockSizeLimit == 0) {
1817
+ blockSizeLimit = devMaxThreadsPerBlock;
1818
+ }
1819
+
1820
+ if (devMaxThreadsPerBlock < blockSizeLimit) {
1821
+ blockSizeLimit = devMaxThreadsPerBlock;
1822
+ }
1823
+
1824
+ if (funcMaxThreadsPerBlock < blockSizeLimit) {
1825
+ blockSizeLimit = funcMaxThreadsPerBlock;
1826
+ }
1827
+
1828
+ blockSizeLimitAligned = ((blockSizeLimit + (granularity - 1)) / granularity) * granularity;
1829
+
1830
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1831
+ // This is needed for the first iteration, because
1832
+ // blockSizeLimitAligned could be greater than blockSizeLimit
1833
+ //
1834
+ if (blockSizeLimit < blockSizeToTryAligned) {
1835
+ blockSizeToTry = blockSizeLimit;
1836
+ } else {
1837
+ blockSizeToTry = blockSizeToTryAligned;
1838
+ }
1839
+
1840
+ dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
1841
+
1842
+ status = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
1843
+ &occupancyInBlocks,
1844
+ func,
1845
+ blockSizeToTry,
1846
+ dynamicSMemSize,
1847
+ flags);
1848
+
1849
+ if (status != cudaSuccess) {
1850
+ return status;
1851
+ }
1852
+
1853
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1854
+
1855
+ if (occupancyInThreads > maxOccupancy) {
1856
+ maxBlockSize = blockSizeToTry;
1857
+ numBlocks = occupancyInBlocks;
1858
+ maxOccupancy = occupancyInThreads;
1859
+ }
1860
+
1861
+ // Early out if we have reached the maximum
1862
+ //
1863
+ if (occupancyLimit == maxOccupancy) {
1864
+ break;
1865
+ }
1866
+ }
1867
+
1868
+ ///////////////////////////
1869
+ // Return best available
1870
+ ///////////////////////////
1871
+
1872
+ // Suggested min grid size to achieve a full machine launch
1873
+ //
1874
+ *minGridSize = numBlocks * multiProcessorCount;
1875
+ *blockSize = maxBlockSize;
1876
+
1877
+ return status;
1878
+ }
1879
+
1880
+ /**
1881
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1882
+ *
1883
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1884
+ * block size pair that achieves the best potential occupancy
1885
+ * (i.e. the maximum number of active warps with the smallest number
1886
+ * of blocks).
1887
+ *
1888
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1889
+ * \param blockSize - Returned block size
1890
+ * \param func - Device function symbol
1891
+ * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block
1892
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1893
+ *
1894
+ * \return
1895
+ * ::cudaSuccess,
1896
+ * ::cudaErrorInvalidDevice,
1897
+ * ::cudaErrorInvalidDeviceFunction,
1898
+ * ::cudaErrorInvalidValue,
1899
+ * ::cudaErrorUnknown,
1900
+ * \notefnerr
1901
+ * \note_init_rt
1902
+ * \note_callback
1903
+ *
1904
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1905
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1906
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1907
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1908
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1909
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1910
+ */
1911
+
1912
+ template<typename UnaryFunction, class T>
1913
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem(
1914
+ int *minGridSize,
1915
+ int *blockSize,
1916
+ T func,
1917
+ UnaryFunction blockSizeToDynamicSMemSize,
1918
+ int blockSizeLimit = 0)
1919
+ {
1920
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, blockSizeLimit, cudaOccupancyDefault);
1921
+ }
1922
+
1923
+ /**
1924
+ * \brief Returns grid and block size that achieves maximum potential occupancy for a device function
1925
+ *
1926
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
1927
+ * block size pair that achieves the best potential occupancy
1928
+ * (i.e. the maximum number of active warps with the smallest number
1929
+ * of blocks).
1930
+ *
1931
+ * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the
1932
+ * amount of per-block dynamic shared memory changes with different
1933
+ * block sizes.
1934
+ *
1935
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
1936
+ * \param blockSize - Returned block size
1937
+ * \param func - Device function symbol
1938
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
1939
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
1940
+ *
1941
+ * \return
1942
+ * ::cudaSuccess,
1943
+ * ::cudaErrorInvalidDevice,
1944
+ * ::cudaErrorInvalidDeviceFunction,
1945
+ * ::cudaErrorInvalidValue,
1946
+ * ::cudaErrorUnknown,
1947
+ * \notefnerr
1948
+ * \note_init_rt
1949
+ * \note_callback
1950
+ *
1951
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1952
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1953
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1954
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1955
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1956
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
1957
+ */
1958
+ template<class T>
1959
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSize(
1960
+ int *minGridSize,
1961
+ int *blockSize,
1962
+ T func,
1963
+ size_t dynamicSMemSize = 0,
1964
+ int blockSizeLimit = 0)
1965
+ {
1966
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, cudaOccupancyDefault);
1967
+ }
1968
+
1969
+ /**
1970
+ * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM.
1971
+ *
1972
+ * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM.
1973
+ *
1974
+ * \param dynamicSmemSize - Returned maximum dynamic shared memory
1975
+ * \param func - Kernel function for which occupancy is calculated
1976
+ * \param numBlocks - Number of blocks to fit on SM
1977
+ * \param blockSize - Size of the block
1978
+ *
1979
+ * \return
1980
+ * ::cudaSuccess,
1981
+ * ::cudaErrorInvalidDevice,
1982
+ * ::cudaErrorInvalidDeviceFunction,
1983
+ * ::cudaErrorInvalidValue,
1984
+ * ::cudaErrorUnknown,
1985
+ * \notefnerr
1986
+ * \note_init_rt
1987
+ * \note_callback
1988
+ *
1989
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
1990
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags
1991
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
1992
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
1993
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
1994
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
1995
+ */
1996
+ template<class T>
1997
+ static __inline__ __host__ cudaError_t cudaOccupancyAvailableDynamicSMemPerBlock(
1998
+ size_t *dynamicSmemSize,
1999
+ T func,
2000
+ int numBlocks,
2001
+ int blockSize)
2002
+ {
2003
+ return ::cudaOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, (const void*)func, numBlocks, blockSize);
2004
+ }
2005
+
2006
+ /**
2007
+ * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags
2008
+ *
2009
+ * Returns in \p *minGridSize and \p *blocksize a suggested grid /
2010
+ * block size pair that achieves the best potential occupancy
2011
+ * (i.e. the maximum number of active warps with the smallest number
2012
+ * of blocks).
2013
+ *
2014
+ * The \p flags parameter controls how special cases are handle. Valid flags include:
2015
+ *
2016
+ * - ::cudaOccupancyDefault: keeps the default behavior as
2017
+ * ::cudaOccupancyMaxPotentialBlockSize
2018
+ *
2019
+ * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior
2020
+ * on platform where global caching affects occupancy. On such platforms, if caching
2021
+ * is enabled, but per-block SM resource usage would result in zero occupancy, the
2022
+ * occupancy calculator will calculate the occupancy as if caching is disabled.
2023
+ * Setting this flag makes the occupancy calculator to return 0 in such cases.
2024
+ * More information can be found about this feature in the "Unified L1/Texture Cache"
2025
+ * section of the Maxwell tuning guide.
2026
+ *
2027
+ * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the
2028
+ * amount of per-block dynamic shared memory changes with different
2029
+ * block sizes.
2030
+ *
2031
+ * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy
2032
+ * \param blockSize - Returned block size
2033
+ * \param func - Device function symbol
2034
+ * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes
2035
+ * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit.
2036
+ * \param flags - Requested behavior for the occupancy calculator
2037
+ *
2038
+ * \return
2039
+ * ::cudaSuccess,
2040
+ * ::cudaErrorInvalidDevice,
2041
+ * ::cudaErrorInvalidDeviceFunction,
2042
+ * ::cudaErrorInvalidValue,
2043
+ * ::cudaErrorUnknown,
2044
+ * \notefnerr
2045
+ * \note_init_rt
2046
+ * \note_callback
2047
+ *
2048
+ * \sa ::cudaOccupancyMaxPotentialBlockSize
2049
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor
2050
+ * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
2051
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem
2052
+ * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags
2053
+ * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock
2054
+ */
2055
+ template<class T>
2056
+ static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags(
2057
+ int *minGridSize,
2058
+ int *blockSize,
2059
+ T func,
2060
+ size_t dynamicSMemSize = 0,
2061
+ int blockSizeLimit = 0,
2062
+ unsigned int flags = 0)
2063
+ {
2064
+ return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, flags);
2065
+ }
2066
+
2067
+ /**
2068
+ * \brief Given the kernel function (\p func) and launch configuration
2069
+ * (\p config), return the maximum cluster size in \p *clusterSize.
2070
+ *
2071
+ * The cluster dimensions in \p config are ignored. If func has a required
2072
+ * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect
2073
+ * the required cluster size.
2074
+ *
2075
+ * By default this function will always return a value that's portable on
2076
+ * future hardware. A higher value may be returned if the kernel function
2077
+ * allows non-portable cluster sizes.
2078
+ *
2079
+ * This function will respect the compile time launch bounds.
2080
+ *
2081
+ * \param clusterSize - Returned maximum cluster size that can be launched
2082
+ * for the given kernel function and launch configuration
2083
+ * \param func - Kernel function for which maximum cluster
2084
+ * size is calculated
2085
+ * \param config - Launch configuration for the given kernel function
2086
+ *
2087
+ * \return
2088
+ * ::cudaSuccess,
2089
+ * ::cudaErrorInvalidDeviceFunction,
2090
+ * ::cudaErrorInvalidValue,
2091
+ * ::cudaErrorUnknown,
2092
+ * \notefnerr
2093
+ * \note_init_rt
2094
+ * \note_callback
2095
+ *
2096
+ * \sa
2097
+ * ::cudaFuncGetAttributes
2098
+ */
2099
+ template<class T>
2100
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxPotentialClusterSize(
2101
+ int *clusterSize,
2102
+ T *func,
2103
+ const cudaLaunchConfig_t *config)
2104
+ {
2105
+ return ::cudaOccupancyMaxPotentialClusterSize(clusterSize, (const void*)func, config);
2106
+ }
2107
+
2108
+ /**
2109
+ * \brief Given the kernel function (\p func) and launch configuration
2110
+ * (\p config), return the maximum number of clusters that could co-exist
2111
+ * on the target device in \p *numClusters.
2112
+ *
2113
+ * If the function has required cluster size already set (see
2114
+ * ::cudaFuncGetAttributes), the cluster size from config must either be
2115
+ * unspecified or match the required size.
2116
+ * Without required sizes, the cluster size must be specified in config,
2117
+ * else the function will return an error.
2118
+ *
2119
+ * Note that various attributes of the kernel function may affect occupancy
2120
+ * calculation. Runtime environment may affect how the hardware schedules
2121
+ * the clusters, so the calculated occupancy is not guaranteed to be achievable.
2122
+ *
2123
+ * \param numClusters - Returned maximum number of clusters that
2124
+ * could co-exist on the target device
2125
+ * \param func - Kernel function for which maximum number
2126
+ * of clusters are calculated
2127
+ * \param config - Launch configuration for the given kernel function
2128
+ *
2129
+ * \return
2130
+ * ::cudaSuccess,
2131
+ * ::cudaErrorInvalidDeviceFunction,
2132
+ * ::cudaErrorInvalidValue,
2133
+ * ::cudaErrorInvalidClusterSize,
2134
+ * ::cudaErrorUnknown,
2135
+ * \notefnerr
2136
+ * \note_init_rt
2137
+ * \note_callback
2138
+ *
2139
+ * \sa
2140
+ * ::cudaFuncGetAttributes
2141
+ */
2142
+ template<class T>
2143
+ static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveClusters(
2144
+ int *numClusters,
2145
+ T *func,
2146
+ const cudaLaunchConfig_t *config)
2147
+ {
2148
+ return ::cudaOccupancyMaxActiveClusters(numClusters, (const void*)func, config);
2149
+ }
2150
+
2151
+ #if defined __CUDACC__
2152
+
2153
+ /**
2154
+ * \brief \hl Find out attributes for a given function
2155
+ *
2156
+ * This function obtains the attributes of a function specified via \p entry.
2157
+ * The parameter \p entry must be a pointer to a function that executes
2158
+ * on the device. The parameter specified by \p entry must be declared as a \p __global__
2159
+ * function. The fetched attributes are placed in \p attr. If the specified
2160
+ * function does not exist, then ::cudaErrorInvalidDeviceFunction is returned.
2161
+ *
2162
+ * Note that some function attributes such as
2163
+ * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock"
2164
+ * may vary based on the device that is currently being used.
2165
+ *
2166
+ * \param attr - Return pointer to function's attributes
2167
+ * \param entry - Function to get attributes of
2168
+ *
2169
+ * \return
2170
+ * ::cudaSuccess,
2171
+ * ::cudaErrorInvalidDeviceFunction
2172
+ * \notefnerr
2173
+ * \note_init_rt
2174
+ * \note_callback
2175
+ *
2176
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
2177
+ * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)",
2178
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)",
2179
+ * ::cudaSetDoubleForDevice,
2180
+ * ::cudaSetDoubleForHost
2181
+ */
2182
+ template<class T>
2183
+ static __inline__ __host__ cudaError_t cudaFuncGetAttributes(
2184
+ struct cudaFuncAttributes *attr,
2185
+ T *entry
2186
+ )
2187
+ {
2188
+ return ::cudaFuncGetAttributes(attr, (const void*)entry);
2189
+ }
2190
+
2191
+ /**
2192
+ * \brief \hl Set attributes for a given function
2193
+ *
2194
+ * This function sets the attributes of a function specified via \p entry.
2195
+ * The parameter \p entry must be a pointer to a function that executes
2196
+ * on the device. The parameter specified by \p entry must be declared as a \p __global__
2197
+ * function. The enumeration defined by \p attr is set to the value defined by \p value.
2198
+ * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned.
2199
+ * If the specified attribute cannot be written, or if the value is incorrect,
2200
+ * then ::cudaErrorInvalidValue is returned.
2201
+ *
2202
+ * Valid values for \p attr are:
2203
+ * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes
2204
+ * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture.
2205
+ * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources,
2206
+ * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor.
2207
+ * This is only a hint, and the driver can choose a different ratio if required to execute the function.
2208
+ * - ::cudaFuncAttributeRequiredClusterWidth: The required cluster width in
2209
+ * blocks. The width, height, and depth values must either all be 0 or all be
2210
+ * positive. The validity of the cluster dimensions is checked at launch time.
2211
+ * If the value is set during compile time, it cannot be set at runtime.
2212
+ * Setting it at runtime will return cudaErrorNotPermitted.
2213
+ * - ::cudaFuncAttributeRequiredClusterHeight: The required cluster height in
2214
+ * blocks. The width, height, and depth values must either all be 0 or all be
2215
+ * positive. The validity of the cluster dimensions is checked at launch time.
2216
+ * If the value is set during compile time, it cannot be set at runtime.
2217
+ * Setting it at runtime will return cudaErrorNotPermitted.
2218
+ * - ::cudaFuncAttributeRequiredClusterDepth: The required cluster depth in
2219
+ * blocks. The width, height, and depth values must either all be 0 or all be
2220
+ * positive. The validity of the cluster dimensions is checked at launch time.
2221
+ * If the value is set during compile time, it cannot be set at runtime.
2222
+ * Setting it at runtime will return cudaErrorNotPermitted.
2223
+ * - ::cudaFuncAttributeClusterSchedulingPolicyPreference: The block
2224
+ * scheduling policy of a function. The value type is cudaClusterSchedulingPolicy.
2225
+ *
2226
+ * \param entry - Function to get attributes of
2227
+ * \param attr - Attribute to set
2228
+ * \param value - Value to set
2229
+ *
2230
+ * \return
2231
+ * ::cudaSuccess,
2232
+ * ::cudaErrorInvalidDeviceFunction,
2233
+ * ::cudaErrorInvalidValue
2234
+ * \notefnerr
2235
+ * \note_init_rt
2236
+ * \note_callback
2237
+ *
2238
+ * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)",
2239
+ * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)",
2240
+ * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)",
2241
+ * ::cudaSetDoubleForDevice,
2242
+ * ::cudaSetDoubleForHost
2243
+ */
2244
+ template<class T>
2245
+ static __inline__ __host__ cudaError_t cudaFuncSetAttribute(
2246
+ T *entry,
2247
+ enum cudaFuncAttribute attr,
2248
+ int value
2249
+ )
2250
+ {
2251
+ return ::cudaFuncSetAttribute((const void*)entry, attr, value);
2252
+ }
2253
+
2254
+ /**
2255
+ * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr
2256
+ *
2257
+ * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr.
2258
+ *
2259
+ * \param kernelPtr - Returns the device kernel
2260
+ * \param entryFuncAddr - Address of device entry function to search kernel for
2261
+ *
2262
+ * \return
2263
+ * ::cudaSuccess
2264
+ *
2265
+ * \sa
2266
+ * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr) "cudaGetKernel (C API)"
2267
+ */
2268
+ template<class T>
2269
+ static __inline__ __host__ cudaError_t cudaGetKernel(
2270
+ cudaKernel_t *kernelPtr,
2271
+ const T *entryFuncAddr
2272
+ )
2273
+ {
2274
+ return ::cudaGetKernel(kernelPtr, (const void *)entryFuncAddr);
2275
+ }
2276
+
2277
+ #endif /* __CUDACC__ */
2278
+
2279
+ /** @} */ /* END CUDART_HIGHLEVEL */
2280
+
2281
+ #endif /* __cplusplus && !__CUDACC_RTC__ */
2282
+
2283
+ #if !defined(__CUDACC_RTC__)
2284
+ #if defined(__GNUC__)
2285
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)))
2286
+ #pragma GCC diagnostic pop
2287
+ #endif
2288
+ #elif defined(_MSC_VER)
2289
+ #pragma warning(pop)
2290
+ #endif
2291
+ #endif
2292
+
2293
+ #undef __CUDA_DEPRECATED
2294
+
2295
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__)
2296
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
2297
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__
2298
+ #endif
2299
+
2300
+ #endif /* !__CUDA_RUNTIME_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_LAUNCH_PARAMETERS_H__)
51
+ #define __DEVICE_LAUNCH_PARAMETERS_H__
52
+
53
+ #include "vector_types.h"
54
+
55
+ #if !defined(__STORAGE__)
56
+
57
+ #if defined(__CUDACC_RTC__)
58
+ #define __STORAGE__ \
59
+ extern const __device__
60
+ #else /* !__CUDACC_RTC__ */
61
+ #define __STORAGE__ \
62
+ extern const
63
+ #endif /* __CUDACC_RTC__ */
64
+
65
+ #endif /* __STORAGE__ */
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif /* __cplusplus */
70
+
71
+ uint3 __device_builtin__ __STORAGE__ threadIdx;
72
+ uint3 __device_builtin__ __STORAGE__ blockIdx;
73
+ dim3 __device_builtin__ __STORAGE__ blockDim;
74
+ dim3 __device_builtin__ __STORAGE__ gridDim;
75
+ int __device_builtin__ __STORAGE__ warpSize;
76
+
77
+ #undef __STORAGE__
78
+
79
+ #if defined(__cplusplus)
80
+ }
81
+ #endif /* __cplusplus */
82
+
83
+ #if !defined(__cudaGet_threadIdx)
84
+
85
+ #define __cudaGet_threadIdx() \
86
+ threadIdx
87
+
88
+ #endif /* __cudaGet_threadIdx */
89
+
90
+ #if !defined(__cudaGet_blockIdx)
91
+
92
+ #define __cudaGet_blockIdx() \
93
+ blockIdx
94
+
95
+ #endif /* __cudaGet_blockIdx */
96
+
97
+ #if !defined(__cudaGet_blockDim)
98
+
99
+ #define __cudaGet_blockDim() \
100
+ blockDim
101
+
102
+ #endif /* __cudaGet_blockDim */
103
+
104
+ #if !defined(__cudaGet_gridDim)
105
+
106
+ #define __cudaGet_gridDim() \
107
+ gridDim
108
+
109
+ #endif /* __cudaGet_gridDim */
110
+
111
+ #if !defined(__cudaGet_warpSize)
112
+
113
+ #define __cudaGet_warpSize() \
114
+ warpSize
115
+
116
+ #endif /* __cudaGet_warpSize */
117
+
118
+ #endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_20_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __SM_20_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ /*******************************************************************************
62
+ * *
63
+ * *
64
+ * *
65
+ *******************************************************************************/
66
+
67
+ #include "cuda_runtime_api.h"
68
+
69
+ /*******************************************************************************
70
+ * *
71
+ * *
72
+ * *
73
+ *******************************************************************************/
74
+
75
+ __SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val)
76
+ {
77
+ return __fAtomicAdd(address, val);
78
+ }
79
+
80
+ #endif /* __cplusplus && __CUDACC__ */
81
+
82
+ #undef __SM_20_ATOMIC_FUNCTIONS_DECL__
83
+
84
+ #endif /* !__SM_20_ATOMIC_FUNCTIONS_HPP__ */
85
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_20_INTRINSICS_HPP__)
51
+ #define __SM_20_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_20_INTRINSICS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __SM_20_INTRINSICS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ /*******************************************************************************
62
+ * *
63
+ * *
64
+ * *
65
+ *******************************************************************************/
66
+
67
+ #include "cuda_runtime_api.h"
68
+
69
+ /*******************************************************************************
70
+ * *
71
+ * *
72
+ * *
73
+ *******************************************************************************/
74
+
75
+ __SM_20_INTRINSICS_DECL__ unsigned int ballot(bool pred)
76
+ {
77
+ return __ballot((int)pred);
78
+ }
79
+
80
+ __SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred)
81
+ {
82
+ return __syncthreads_count((int)pred);
83
+ }
84
+
85
+ __SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred)
86
+ {
87
+ return (bool)__syncthreads_and((int)pred);
88
+ }
89
+
90
+ __SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred)
91
+ {
92
+ return (bool)__syncthreads_or((int)pred);
93
+ }
94
+
95
+
96
+ extern "C" {
97
+ __device__ unsigned __nv_isGlobal_impl(const void *);
98
+ __device__ unsigned __nv_isShared_impl(const void *);
99
+ __device__ unsigned __nv_isConstant_impl(const void *);
100
+ __device__ unsigned __nv_isLocal_impl(const void *);
101
+ __device__ unsigned __nv_isGridConstant_impl(const void *);
102
+ }
103
+
104
+ __SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr)
105
+ {
106
+ return __nv_isGlobal_impl(ptr);
107
+ }
108
+
109
+ __SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr)
110
+ {
111
+ return __nv_isShared_impl(ptr);
112
+ }
113
+
114
+ __SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr)
115
+ {
116
+ return __nv_isConstant_impl(ptr);
117
+ }
118
+
119
+ __SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr)
120
+ {
121
+ return __nv_isLocal_impl(ptr);
122
+ }
123
+
124
+ #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
125
+ __SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr)
126
+ {
127
+ return __nv_isGridConstant_impl(ptr);
128
+ }
129
+ #endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */
130
+
131
+ extern "C" {
132
+ __device__ size_t __nv_cvta_generic_to_global_impl(const void *);
133
+ __device__ size_t __nv_cvta_generic_to_shared_impl(const void *);
134
+ __device__ size_t __nv_cvta_generic_to_constant_impl(const void *);
135
+ __device__ size_t __nv_cvta_generic_to_local_impl(const void *);
136
+ __device__ void * __nv_cvta_global_to_generic_impl(size_t);
137
+ __device__ void * __nv_cvta_shared_to_generic_impl(size_t);
138
+ __device__ void * __nv_cvta_constant_to_generic_impl(size_t);
139
+ __device__ void * __nv_cvta_local_to_generic_impl(size_t);
140
+ }
141
+
142
+ __SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *p)
143
+ {
144
+ return __nv_cvta_generic_to_global_impl(p);
145
+ }
146
+
147
+ __SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *p)
148
+ {
149
+ return __nv_cvta_generic_to_shared_impl(p);
150
+ }
151
+
152
+ __SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *p)
153
+ {
154
+ return __nv_cvta_generic_to_constant_impl(p);
155
+ }
156
+
157
+ __SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *p)
158
+ {
159
+ return __nv_cvta_generic_to_local_impl(p);
160
+ }
161
+
162
+ __SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits)
163
+ {
164
+ return __nv_cvta_global_to_generic_impl(rawbits);
165
+ }
166
+
167
+ __SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits)
168
+ {
169
+ return __nv_cvta_shared_to_generic_impl(rawbits);
170
+ }
171
+
172
+ __SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits)
173
+ {
174
+ return __nv_cvta_constant_to_generic_impl(rawbits);
175
+ }
176
+
177
+ __SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits)
178
+ {
179
+ return __nv_cvta_local_to_generic_impl(rawbits);
180
+ }
181
+
182
+ #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
183
+ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
184
+ #define __CVTA_PTR_64 1
185
+ #endif
186
+
187
+ __SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr)
188
+ {
189
+ #if __CVTA_PTR_64
190
+ unsigned long long ret;
191
+ asm("cvta.to.param.u64 %0, %1;" : "=l"(ret) : "l"(ptr));
192
+ #else /* !__CVTA_PTR_64 */
193
+ unsigned ret;
194
+ asm("cvta.to.param.u32 %0, %1;" : "=r"(ret) : "r"(ptr));
195
+ #endif /* __CVTA_PTR_64 */
196
+ return (size_t)ret;
197
+
198
+ }
199
+
200
+ __SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits)
201
+ {
202
+ void *ret;
203
+ #if __CVTA_PTR_64
204
+ unsigned long long in = rawbits;
205
+ asm("cvta.param.u64 %0, %1;" : "=l"(ret) : "l"(in));
206
+ #else /* !__CVTA_PTR_64 */
207
+ unsigned in = rawbits;
208
+ asm("cvta.param.u32 %0, %1;" : "=r"(ret) : "r"(in));
209
+ #endif /* __CVTA_PTR_64 */
210
+ return ret;
211
+ }
212
+ #undef __CVTA_PTR_64
213
+ #endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */
214
+
215
+
216
+ #endif /* __cplusplus && __CUDACC__ */
217
+
218
+ #undef __SM_20_INTRINSICS_DECL__
219
+
220
+ #endif /* !__SM_20_INTRINSICS_HPP__ */
221
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_30_INTRINSICS_H__)
51
+ #define __SM_30_INTRINSICS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_30_INTRINSICS_DECL__ __device__
55
+ #elif defined(_NVHPC_CUDA)
56
+ #define __SM_30_INTRINSICS_DECL__ extern __device__ __cudart_builtin__
57
+ #else /* !__CUDACC_RTC__ */
58
+ #define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
59
+ #endif /* __CUDACC_RTC__ */
60
+
61
+ #if defined(__cplusplus) && defined(__CUDACC__)
62
+
63
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
64
+
65
+ /*******************************************************************************
66
+ * *
67
+ * *
68
+ * *
69
+ *******************************************************************************/
70
+
71
+ #include "cuda_runtime_api.h"
72
+
73
+ /* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA
74
+ * C++ compiler where the macro __CUDA_ARCH__ is not defined. */
75
+ #if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
76
+ #define __DEF_IF_HOST { }
77
+ #else /* !__CUDA_ARCH__ */
78
+ #define __DEF_IF_HOST ;
79
+ #endif /* __CUDA_ARCH__ */
80
+
81
+
82
+ /*******************************************************************************
83
+ * *
84
+ * Below are declarations of SM-3.0 intrinsics which are included as *
85
+ * source (instead of being built in to the compiler) *
86
+ * *
87
+ *******************************************************************************/
88
+
89
+ #if !defined warpSize && !defined __local_warpSize
90
+ #define warpSize 32
91
+ #define __local_warpSize
92
+ #endif
93
+
94
+ #if defined(_WIN32)
95
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
96
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
97
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
98
+ #else
99
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
100
+ #endif
101
+
102
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
103
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)."
104
+ #elif defined(_NVHPC_CUDA)
105
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()."
106
+ #endif
107
+
108
+ __SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST
109
+ __SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST
110
+ __SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST
111
+ __SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST
112
+ __SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST
113
+ __SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST
114
+ __SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST
115
+ __SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST
116
+ __SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST
117
+
118
+ // Warp register exchange (shuffle) intrinsics.
119
+ // Notes:
120
+ // a) Warp size is hardcoded to 32 here, because the compiler does not know
121
+ // the "warpSize" constant at this time
122
+ // b) we cannot map the float __shfl to the int __shfl because it'll mess with
123
+ // the register number (especially if you're doing two shfls to move a double).
124
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
125
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST
126
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
127
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
128
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
129
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
130
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
131
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST
132
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
133
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST
134
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
135
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
136
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST
137
+ #endif
138
+
139
+ __SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST
140
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
141
+ __SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
142
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
143
+ __SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
144
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
145
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST
146
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
147
+ __SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST
148
+ __SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
149
+ __SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
150
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST
151
+
152
+ // 64-bits SHFL
153
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
154
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
155
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
156
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
157
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
158
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
159
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
160
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
161
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
162
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST
163
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
164
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
165
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST
166
+ #endif
167
+
168
+ __SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
169
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
170
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
171
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
172
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
173
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
174
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
175
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
176
+ __SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST
177
+ __SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
178
+ __SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
179
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST
180
+
181
+ // long needs some help to choose between 32-bits and 64-bits
182
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
183
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST
184
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
185
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
186
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
187
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
188
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
189
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST
190
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
191
+ #endif
192
+
193
+ __SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST
194
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
195
+ __SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
196
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
197
+ __SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
198
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
199
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST
200
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
201
+
202
+ #undef __DEPRECATED__
203
+ #undef __WSB_DEPRECATION_MESSAGE
204
+
205
+ #if defined(__local_warpSize)
206
+ #undef warpSize
207
+ #undef __local_warpSize
208
+ #endif
209
+
210
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
211
+
212
+ #endif /* __cplusplus && __CUDACC__ */
213
+
214
+ #undef __DEF_IF_HOST
215
+ #undef __SM_30_INTRINSICS_DECL__
216
+
217
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
218
+ #include "sm_30_intrinsics.hpp"
219
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
220
+
221
+ #endif /* !__SM_30_INTRINSICS_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_30_INTRINSICS_HPP__)
51
+ #define __SM_30_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_30_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ // In here are intrinsics which are built in to the compiler. These may be
72
+ // referenced by intrinsic implementations from this file.
73
+ extern "C"
74
+ {
75
+ }
76
+
77
+ /*******************************************************************************
78
+ * *
79
+ * Below are implementations of SM-3.0 intrinsics which are included as *
80
+ * source (instead of being built in to the compiler) *
81
+ * *
82
+ *******************************************************************************/
83
+
84
+ #if !defined warpSize && !defined __local_warpSize
85
+ #define warpSize 32
86
+ #define __local_warpSize
87
+ #endif
88
+
89
+ __SM_30_INTRINSICS_DECL__
90
+ unsigned __fns(unsigned mask, unsigned base, int offset) {
91
+ extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset);
92
+ return __nvvm_fns(mask, base, offset);
93
+ }
94
+
95
+ __SM_30_INTRINSICS_DECL__
96
+ void __barrier_sync(unsigned id) {
97
+ extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id);
98
+ return __nvvm_barrier_sync(id);
99
+ }
100
+
101
+ __SM_30_INTRINSICS_DECL__
102
+ void __barrier_sync_count(unsigned id, unsigned cnt) {
103
+ extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt);
104
+ return __nvvm_barrier_sync_cnt(id, cnt);
105
+ }
106
+
107
+ __SM_30_INTRINSICS_DECL__
108
+ void __syncwarp(unsigned mask) {
109
+ extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask);
110
+ return __nvvm_bar_warp_sync(mask);
111
+ }
112
+
113
+ __SM_30_INTRINSICS_DECL__
114
+ int __all_sync(unsigned mask, int pred) {
115
+ extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred);
116
+ return __nvvm_vote_all_sync(mask, pred);
117
+ }
118
+
119
+ __SM_30_INTRINSICS_DECL__
120
+ int __any_sync(unsigned mask, int pred) {
121
+ extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred);
122
+ return __nvvm_vote_any_sync(mask, pred);
123
+ }
124
+
125
+ __SM_30_INTRINSICS_DECL__
126
+ int __uni_sync(unsigned mask, int pred) {
127
+ extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred);
128
+ return __nvvm_vote_uni_sync(mask, pred);
129
+ }
130
+
131
+ __SM_30_INTRINSICS_DECL__
132
+ unsigned __ballot_sync(unsigned mask, int pred) {
133
+ extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred);
134
+ return __nvvm_vote_ballot_sync(mask, pred);
135
+ }
136
+
137
+ __SM_30_INTRINSICS_DECL__
138
+ unsigned __activemask() {
139
+ unsigned ret;
140
+ asm volatile ("activemask.b32 %0;" : "=r"(ret));
141
+ return ret;
142
+ }
143
+
144
+ // These are removed starting with compute_70 and onwards
145
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
146
+
147
+ __SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) {
148
+ int ret;
149
+ int c = ((warpSize-width) << 8) | 0x1f;
150
+ asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c));
151
+ return ret;
152
+ }
153
+
154
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) {
155
+ return (unsigned int) __shfl((int)var, srcLane, width);
156
+ }
157
+
158
+ __SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) {
159
+ int ret;
160
+ int c = (warpSize-width) << 8;
161
+ asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
162
+ return ret;
163
+ }
164
+
165
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) {
166
+ return (unsigned int) __shfl_up((int)var, delta, width);
167
+ }
168
+
169
+ __SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) {
170
+ int ret;
171
+ int c = ((warpSize-width) << 8) | 0x1f;
172
+ asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
173
+ return ret;
174
+ }
175
+
176
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) {
177
+ return (unsigned int) __shfl_down((int)var, delta, width);
178
+ }
179
+
180
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) {
181
+ int ret;
182
+ int c = ((warpSize-width) << 8) | 0x1f;
183
+ asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c));
184
+ return ret;
185
+ }
186
+
187
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) {
188
+ return (unsigned int) __shfl_xor((int)var, laneMask, width);
189
+ }
190
+
191
+ __SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) {
192
+ float ret;
193
+ int c;
194
+ c = ((warpSize-width) << 8) | 0x1f;
195
+ asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c));
196
+ return ret;
197
+ }
198
+
199
+ __SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) {
200
+ float ret;
201
+ int c;
202
+ c = (warpSize-width) << 8;
203
+ asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
204
+ return ret;
205
+ }
206
+
207
+ __SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) {
208
+ float ret;
209
+ int c;
210
+ c = ((warpSize-width) << 8) | 0x1f;
211
+ asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
212
+ return ret;
213
+ }
214
+
215
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) {
216
+ float ret;
217
+ int c;
218
+ c = ((warpSize-width) << 8) | 0x1f;
219
+ asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c));
220
+ return ret;
221
+ }
222
+
223
+ // 64-bits SHFL
224
+
225
+ __SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) {
226
+ int lo, hi;
227
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
228
+ hi = __shfl(hi, srcLane, width);
229
+ lo = __shfl(lo, srcLane, width);
230
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
231
+ return var;
232
+ }
233
+
234
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) {
235
+ return (unsigned long long) __shfl((long long) var, srcLane, width);
236
+ }
237
+
238
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) {
239
+ int lo, hi;
240
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
241
+ hi = __shfl_up(hi, delta, width);
242
+ lo = __shfl_up(lo, delta, width);
243
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
244
+ return var;
245
+ }
246
+
247
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) {
248
+ return (unsigned long long) __shfl_up((long long) var, delta, width);
249
+ }
250
+
251
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) {
252
+ int lo, hi;
253
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
254
+ hi = __shfl_down(hi, delta, width);
255
+ lo = __shfl_down(lo, delta, width);
256
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
257
+ return var;
258
+ }
259
+
260
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) {
261
+ return (unsigned long long) __shfl_down((long long) var, delta, width);
262
+ }
263
+
264
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) {
265
+ int lo, hi;
266
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
267
+ hi = __shfl_xor(hi, laneMask, width);
268
+ lo = __shfl_xor(lo, laneMask, width);
269
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
270
+ return var;
271
+ }
272
+
273
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) {
274
+ return (unsigned long long) __shfl_xor((long long) var, laneMask, width);
275
+ }
276
+
277
+ __SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) {
278
+ unsigned lo, hi;
279
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
280
+ hi = __shfl(hi, srcLane, width);
281
+ lo = __shfl(lo, srcLane, width);
282
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
283
+ return var;
284
+ }
285
+
286
+ __SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) {
287
+ unsigned lo, hi;
288
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
289
+ hi = __shfl_up(hi, delta, width);
290
+ lo = __shfl_up(lo, delta, width);
291
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
292
+ return var;
293
+ }
294
+
295
+ __SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) {
296
+ unsigned lo, hi;
297
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
298
+ hi = __shfl_down(hi, delta, width);
299
+ lo = __shfl_down(lo, delta, width);
300
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
301
+ return var;
302
+ }
303
+
304
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) {
305
+ unsigned lo, hi;
306
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
307
+ hi = __shfl_xor(hi, laneMask, width);
308
+ lo = __shfl_xor(lo, laneMask, width);
309
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
310
+ return var;
311
+ }
312
+
313
+ __SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) {
314
+ return (sizeof(long) == sizeof(long long)) ?
315
+ __shfl((long long) var, srcLane, width) :
316
+ __shfl((int) var, srcLane, width);
317
+ }
318
+
319
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) {
320
+ return (sizeof(long) == sizeof(long long)) ?
321
+ __shfl((unsigned long long) var, srcLane, width) :
322
+ __shfl((unsigned int) var, srcLane, width);
323
+ }
324
+
325
+ __SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) {
326
+ return (sizeof(long) == sizeof(long long)) ?
327
+ __shfl_up((long long) var, delta, width) :
328
+ __shfl_up((int) var, delta, width);
329
+ }
330
+
331
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) {
332
+ return (sizeof(long) == sizeof(long long)) ?
333
+ __shfl_up((unsigned long long) var, delta, width) :
334
+ __shfl_up((unsigned int) var, delta, width);
335
+ }
336
+
337
+ __SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) {
338
+ return (sizeof(long) == sizeof(long long)) ?
339
+ __shfl_down((long long) var, delta, width) :
340
+ __shfl_down((int) var, delta, width);
341
+ }
342
+
343
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) {
344
+ return (sizeof(long) == sizeof(long long)) ?
345
+ __shfl_down((unsigned long long) var, delta, width) :
346
+ __shfl_down((unsigned int) var, delta, width);
347
+ }
348
+
349
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) {
350
+ return (sizeof(long) == sizeof(long long)) ?
351
+ __shfl_xor((long long) var, laneMask, width) :
352
+ __shfl_xor((int) var, laneMask, width);
353
+ }
354
+
355
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) {
356
+ return (sizeof(long) == sizeof(long long)) ?
357
+ __shfl_xor((unsigned long long) var, laneMask, width) :
358
+ __shfl_xor((unsigned int) var, laneMask, width);
359
+ }
360
+
361
+ #endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
362
+
363
+ // Warp register exchange (shuffle) intrinsics.
364
+ // Notes:
365
+ // a) Warp size is hardcoded to 32 here, because the compiler does not know
366
+ // the "warpSize" constant at this time
367
+ // b) we cannot map the float __shfl to the int __shfl because it'll mess with
368
+ // the register number (especially if you're doing two shfls to move a double).
369
+ __SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) {
370
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
371
+ int ret;
372
+ int c = ((warpSize-width) << 8) | 0x1f;
373
+ ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c);
374
+ return ret;
375
+ }
376
+
377
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) {
378
+ return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width);
379
+ }
380
+
381
+ __SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) {
382
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
383
+ int ret;
384
+ int c = (warpSize-width) << 8;
385
+ ret = __nvvm_shfl_up_sync(mask, var, delta, c);
386
+ return ret;
387
+ }
388
+
389
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
390
+ return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width);
391
+ }
392
+
393
+ __SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) {
394
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
395
+ int ret;
396
+ int c = ((warpSize-width) << 8) | 0x1f;
397
+ ret = __nvvm_shfl_down_sync(mask, var, delta, c);
398
+ return ret;
399
+ }
400
+
401
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
402
+ return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width);
403
+ }
404
+
405
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) {
406
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
407
+ int ret;
408
+ int c = ((warpSize-width) << 8) | 0x1f;
409
+ ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c);
410
+ return ret;
411
+ }
412
+
413
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) {
414
+ return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width);
415
+ }
416
+
417
+ __SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) {
418
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
419
+ int ret;
420
+ int c;
421
+ c = ((warpSize-width) << 8) | 0x1f;
422
+ ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c);
423
+ return __int_as_float(ret);
424
+ }
425
+
426
+ __SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) {
427
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
428
+ int ret;
429
+ int c;
430
+ c = (warpSize-width) << 8;
431
+ ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c);
432
+ return __int_as_float(ret);
433
+ }
434
+
435
+ __SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) {
436
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
437
+ int ret;
438
+ int c;
439
+ c = ((warpSize-width) << 8) | 0x1f;
440
+ ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c);
441
+ return __int_as_float(ret);
442
+ }
443
+
444
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) {
445
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
446
+ int ret;
447
+ int c;
448
+ c = ((warpSize-width) << 8) | 0x1f;
449
+ ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c);
450
+ return __int_as_float(ret);
451
+ }
452
+
453
+ // 64-bits SHFL
454
+ __SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) {
455
+ int lo, hi;
456
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
457
+ hi = __shfl_sync(mask, hi, srcLane, width);
458
+ lo = __shfl_sync(mask, lo, srcLane, width);
459
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
460
+ return var;
461
+ }
462
+
463
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) {
464
+ return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width);
465
+ }
466
+
467
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) {
468
+ int lo, hi;
469
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
470
+ hi = __shfl_up_sync(mask, hi, delta, width);
471
+ lo = __shfl_up_sync(mask, lo, delta, width);
472
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
473
+ return var;
474
+ }
475
+
476
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
477
+ return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width);
478
+ }
479
+
480
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) {
481
+ int lo, hi;
482
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
483
+ hi = __shfl_down_sync(mask, hi, delta, width);
484
+ lo = __shfl_down_sync(mask, lo, delta, width);
485
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
486
+ return var;
487
+ }
488
+
489
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
490
+ return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width);
491
+ }
492
+
493
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) {
494
+ int lo, hi;
495
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
496
+ hi = __shfl_xor_sync(mask, hi, laneMask, width);
497
+ lo = __shfl_xor_sync(mask, lo, laneMask, width);
498
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
499
+ return var;
500
+ }
501
+
502
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) {
503
+ return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width);
504
+ }
505
+
506
+ __SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) {
507
+ unsigned lo, hi;
508
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
509
+ hi = __shfl_sync(mask, hi, srcLane, width);
510
+ lo = __shfl_sync(mask, lo, srcLane, width);
511
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
512
+ return var;
513
+ }
514
+
515
+ __SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) {
516
+ unsigned lo, hi;
517
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
518
+ hi = __shfl_up_sync(mask, hi, delta, width);
519
+ lo = __shfl_up_sync(mask, lo, delta, width);
520
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
521
+ return var;
522
+ }
523
+
524
+ __SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) {
525
+ unsigned lo, hi;
526
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
527
+ hi = __shfl_down_sync(mask, hi, delta, width);
528
+ lo = __shfl_down_sync(mask, lo, delta, width);
529
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
530
+ return var;
531
+ }
532
+
533
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) {
534
+ unsigned lo, hi;
535
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
536
+ hi = __shfl_xor_sync(mask, hi, laneMask, width);
537
+ lo = __shfl_xor_sync(mask, lo, laneMask, width);
538
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
539
+ return var;
540
+ }
541
+
542
+ // long needs some help to choose between 32-bits and 64-bits
543
+
544
+ __SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) {
545
+ return (sizeof(long) == sizeof(long long)) ?
546
+ __shfl_sync(mask, (long long) var, srcLane, width) :
547
+ __shfl_sync(mask, (int) var, srcLane, width);
548
+ }
549
+
550
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) {
551
+ return (sizeof(long) == sizeof(long long)) ?
552
+ __shfl_sync(mask, (unsigned long long) var, srcLane, width) :
553
+ __shfl_sync(mask, (unsigned int) var, srcLane, width);
554
+ }
555
+
556
+ __SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) {
557
+ return (sizeof(long) == sizeof(long long)) ?
558
+ __shfl_up_sync(mask, (long long) var, delta, width) :
559
+ __shfl_up_sync(mask, (int) var, delta, width);
560
+ }
561
+
562
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
563
+ return (sizeof(long) == sizeof(long long)) ?
564
+ __shfl_up_sync(mask, (unsigned long long) var, delta, width) :
565
+ __shfl_up_sync(mask, (unsigned int) var, delta, width);
566
+ }
567
+
568
+ __SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) {
569
+ return (sizeof(long) == sizeof(long long)) ?
570
+ __shfl_down_sync(mask, (long long) var, delta, width) :
571
+ __shfl_down_sync(mask, (int) var, delta, width);
572
+ }
573
+
574
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
575
+ return (sizeof(long) == sizeof(long long)) ?
576
+ __shfl_down_sync(mask, (unsigned long long) var, delta, width) :
577
+ __shfl_down_sync(mask, (unsigned int) var, delta, width);
578
+ }
579
+
580
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) {
581
+ return (sizeof(long) == sizeof(long long)) ?
582
+ __shfl_xor_sync(mask, (long long) var, laneMask, width) :
583
+ __shfl_xor_sync(mask, (int) var, laneMask, width);
584
+ }
585
+
586
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) {
587
+ return (sizeof(long) == sizeof(long long)) ?
588
+ __shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) :
589
+ __shfl_xor_sync(mask, (unsigned int) var, laneMask, width);
590
+ }
591
+
592
+ #if defined(__local_warpSize)
593
+ #undef warpSize
594
+ #undef __local_warpSize
595
+ #endif
596
+
597
+ #endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
598
+
599
+ #endif /* __cplusplus && __CUDACC__ */
600
+
601
+ #undef __SM_30_INTRINSICS_DECL__
602
+
603
+ #endif /* !__SM_30_INTRINSICS_HPP__ */
604
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_INTRINSICS_HPP__)
51
+ #define __SM_32_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ // In here are intrinsics which are built in to the compiler. These may be
72
+ // referenced by intrinsic implementations from this file.
73
+ extern "C"
74
+ {
75
+ // There are no intrinsics built in to the compiler for SM-3.5,
76
+ // all intrinsics are now implemented as inline PTX below.
77
+ }
78
+
79
+ /*******************************************************************************
80
+ * *
81
+ * Below are implementations of SM-3.5 intrinsics which are included as *
82
+ * source (instead of being built in to the compiler) *
83
+ * *
84
+ *******************************************************************************/
85
+
86
+ // LDG is a "load from global via texture path" command which can exhibit higher
87
+ // bandwidth on GK110 than a regular LD.
88
+ // Define a different pointer storage size for 64 and 32 bit
89
+ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
90
+ #define __LDG_PTR "l"
91
+ #else
92
+ #define __LDG_PTR "r"
93
+ #endif
94
+
95
+ /******************************************************************************
96
+ * __ldg *
97
+ ******************************************************************************/
98
+
99
+ // Size of long is architecture and OS specific.
100
+ #if defined(__LP64__) // 64 bits
101
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
102
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
103
+ #else // 32 bits
104
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
105
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
106
+ #endif
107
+
108
+
109
+ __SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
110
+ __SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
111
+ __SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
112
+ __SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
113
+ __SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
114
+ __SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
115
+ __SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
116
+ __SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
117
+ __SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
118
+ __SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
119
+ __SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
120
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
121
+
122
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
123
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
124
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
125
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
126
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
127
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
128
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
129
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
130
+ __SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
131
+ __SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
132
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
133
+
134
+ __SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
135
+ __SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
136
+ __SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
137
+ __SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
138
+ __SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
139
+
140
+
141
+ /******************************************************************************
142
+ * __ldcg *
143
+ ******************************************************************************/
144
+
145
+ // Size of long is architecture and OS specific.
146
+ #if defined(__LP64__) // 64 bits
147
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
148
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
149
+ #else // 32 bits
150
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
151
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
152
+ #endif
153
+
154
+
155
+ __SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
156
+ __SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
157
+ __SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
158
+ __SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
159
+ __SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
160
+ __SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
161
+ __SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
162
+ __SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
163
+ __SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
164
+ __SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
165
+ __SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
166
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
167
+
168
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
169
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
170
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
171
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
172
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
173
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
174
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
175
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
176
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
177
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
178
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
179
+
180
+ __SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
181
+ __SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
182
+ __SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
183
+ __SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
184
+ __SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
185
+
186
+ /******************************************************************************
187
+ * __ldca *
188
+ ******************************************************************************/
189
+
190
+ // Size of long is architecture and OS specific.
191
+ #if defined(__LP64__) // 64 bits
192
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
193
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
194
+ #else // 32 bits
195
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
196
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
197
+ #endif
198
+
199
+
200
+ __SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
201
+ __SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
202
+ __SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
203
+ __SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
204
+ __SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
205
+ __SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
206
+ __SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
207
+ __SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
208
+ __SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
209
+ __SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
210
+ __SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
211
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
212
+
213
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
214
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
215
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
216
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
217
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
218
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
219
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
220
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
221
+ __SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
222
+ __SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
223
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
224
+
225
+ __SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
226
+ __SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
227
+ __SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
228
+ __SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
229
+ __SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
230
+
231
+ /******************************************************************************
232
+ * __ldcs *
233
+ ******************************************************************************/
234
+
235
+ // Size of long is architecture and OS specific.
236
+ #if defined(__LP64__) // 64 bits
237
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
238
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
239
+ #else // 32 bits
240
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
241
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
242
+ #endif
243
+
244
+
245
+ __SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
246
+ __SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
247
+ __SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
248
+ __SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
249
+ __SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
250
+ __SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
251
+ __SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
252
+ __SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
253
+ __SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
254
+ __SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
255
+ __SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
256
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
257
+
258
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
259
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
260
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
261
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
262
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
263
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
264
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
265
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
266
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
267
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
268
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
269
+
270
+ __SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
271
+ __SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
272
+ __SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
273
+ __SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
274
+ __SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
275
+
276
+ /******************************************************************************
277
+ * __ldlu *
278
+ ******************************************************************************/
279
+
280
+ // Size of long is architecture and OS specific.
281
+ #if defined(__LP64__) // 64 bits
282
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
283
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
284
+ #else // 32 bits
285
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
286
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
287
+ #endif
288
+
289
+
290
+ __SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
291
+ __SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
292
+ __SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
293
+ __SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
294
+ __SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
295
+ __SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
296
+ __SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
297
+ __SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
298
+ __SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
299
+ __SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
300
+ __SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
301
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
302
+
303
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
304
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
305
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
306
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
307
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
308
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
309
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
310
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
311
+ __SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
312
+ __SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
313
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
314
+
315
+ __SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
316
+ __SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
317
+ __SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
318
+ __SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
319
+ __SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
320
+
321
+ /******************************************************************************
322
+ * __ldcv *
323
+ ******************************************************************************/
324
+
325
+ // Size of long is architecture and OS specific.
326
+ #if defined(__LP64__) // 64 bits
327
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
328
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
329
+ #else // 32 bits
330
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
331
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
332
+ #endif
333
+
334
+
335
+ __SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
336
+ __SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
337
+ __SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
338
+ __SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
339
+ __SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
340
+ __SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
341
+ __SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
342
+ __SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
343
+ __SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
344
+ __SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
345
+ __SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
346
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
347
+
348
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
349
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
350
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
351
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
352
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
353
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
354
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
355
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
356
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
357
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
358
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
359
+
360
+ __SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
361
+ __SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
362
+ __SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
363
+ __SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
364
+ __SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
365
+
366
+ /******************************************************************************
367
+ * __stwb *
368
+ ******************************************************************************/
369
+
370
+ // Size of long is architecture and OS specific.
371
+ #if defined(__LP64__) // 64 bits
372
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
373
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
374
+ #else // 32 bits
375
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
376
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
377
+ #endif
378
+
379
+
380
+ __SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
381
+ __SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
382
+ __SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
383
+ __SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
384
+ __SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
385
+ __SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
386
+ __SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
387
+ __SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
388
+ __SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
389
+ __SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
390
+ __SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
391
+ __SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
392
+
393
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
394
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
395
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
396
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
397
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
398
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
399
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
400
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
401
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
402
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
403
+ __SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
404
+
405
+ __SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
406
+ __SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
407
+ __SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
408
+ __SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
409
+ __SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
410
+
411
+ /******************************************************************************
412
+ * __stcg *
413
+ ******************************************************************************/
414
+
415
+ // Size of long is architecture and OS specific.
416
+ #if defined(__LP64__) // 64 bits
417
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
418
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
419
+ #else // 32 bits
420
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
421
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
422
+ #endif
423
+
424
+
425
+ __SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
426
+ __SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
427
+ __SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
428
+ __SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
429
+ __SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
430
+ __SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
431
+ __SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
432
+ __SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
433
+ __SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
434
+ __SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
435
+ __SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
436
+ __SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
437
+
438
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
439
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
440
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
441
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
442
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
443
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
444
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
445
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
446
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
447
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
448
+ __SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
449
+
450
+ __SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
451
+ __SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
452
+ __SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
453
+ __SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
454
+ __SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
455
+
456
+ /******************************************************************************
457
+ * __stcs *
458
+ ******************************************************************************/
459
+
460
+ // Size of long is architecture and OS specific.
461
+ #if defined(__LP64__) // 64 bits
462
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
463
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
464
+ #else // 32 bits
465
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
466
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
467
+ #endif
468
+
469
+
470
+ __SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
471
+ __SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
472
+ __SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
473
+ __SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
474
+ __SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
475
+ __SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
476
+ __SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
477
+ __SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
478
+ __SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
479
+ __SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
480
+ __SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
481
+ __SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
482
+
483
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
484
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
485
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
486
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
487
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
488
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
489
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
490
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
491
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
492
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
493
+ __SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
494
+
495
+ __SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
496
+ __SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
497
+ __SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
498
+ __SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
499
+ __SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
500
+
501
+ /******************************************************************************
502
+ * __stwt *
503
+ ******************************************************************************/
504
+
505
+ // Size of long is architecture and OS specific.
506
+ #if defined(__LP64__) // 64 bits
507
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
508
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
509
+ #else // 32 bits
510
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
511
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
512
+ #endif
513
+
514
+
515
+ __SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
516
+ __SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
517
+ __SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
518
+ __SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
519
+ __SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
520
+ __SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
521
+ __SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
522
+ __SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
523
+ __SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
524
+ __SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
525
+ __SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
526
+ __SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
527
+
528
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
529
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
530
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
531
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
532
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
533
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
534
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
535
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
536
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
537
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
538
+ __SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
539
+
540
+ __SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
541
+ __SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
542
+ __SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
543
+ __SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
544
+ __SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
545
+
546
+ #undef __LDG_PTR
547
+
548
+
549
+ // SHF is the "funnel shift" operation - an accelerated left/right shift with carry
550
+ // operating on 64-bit quantities, which are concatenations of two 32-bit registers.
551
+
552
+ // This shifts [b:a] left by "shift" bits, returning the most significant bits of the result.
553
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift)
554
+ {
555
+ unsigned int ret;
556
+ asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
557
+ return ret;
558
+ }
559
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift)
560
+ {
561
+ unsigned int ret;
562
+ asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
563
+ return ret;
564
+ }
565
+
566
+ // This shifts [b:a] right by "shift" bits, returning the least significant bits of the result.
567
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift)
568
+ {
569
+ unsigned int ret;
570
+ asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
571
+ return ret;
572
+ }
573
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift)
574
+ {
575
+ unsigned int ret;
576
+ asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
577
+ return ret;
578
+ }
579
+
580
+
581
+ #endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
582
+
583
+ #endif /* __cplusplus && __CUDACC__ */
584
+
585
+ #undef __SM_32_INTRINSICS_DECL__
586
+
587
+ #endif /* !__SM_32_INTRINSICS_HPP__ */
588
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_60_ATOMIC_FUNCTIONS_H__)
51
+ #define __SM_60_ATOMIC_FUNCTIONS_H__
52
+
53
+
54
+ #if defined(__CUDACC_RTC__)
55
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
56
+ #elif defined(_NVHPC_CUDA)
57
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__
58
+ #else /* __CUDACC_RTC__ */
59
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
60
+ #endif /* __CUDACC_RTC__ */
61
+
62
+ #if defined(__cplusplus) && defined(__CUDACC__)
63
+
64
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
65
+
66
+ /*******************************************************************************
67
+ * *
68
+ * *
69
+ * *
70
+ *******************************************************************************/
71
+
72
+ #include "cuda_runtime_api.h"
73
+
74
+ /* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA
75
+ * C++ compiler where the macro __CUDA_ARCH__ is not defined. */
76
+ #if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
77
+ #define __DEF_IF_HOST { }
78
+ #else /* !__CUDA_ARCH__ */
79
+ #define __DEF_IF_HOST ;
80
+ #endif /* __CUDA_ARCH__ */
81
+
82
+
83
+
84
+ #ifdef __CUDA_ARCH__
85
+ extern "C"
86
+ {
87
+ extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val);
88
+
89
+ extern __device__ __device_builtin__
90
+ int __iAtomicAdd_block(int *address, int val);
91
+
92
+ extern __device__ __device_builtin__
93
+ int __iAtomicAdd_system(int *address, int val);
94
+
95
+ extern __device__ __device_builtin__
96
+ unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val);
97
+
98
+ extern __device__ __device_builtin__
99
+ unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val);
100
+
101
+ extern __device__ __device_builtin__
102
+ unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val);
103
+
104
+ extern __device__ __device_builtin__
105
+ unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val);
106
+
107
+ extern __device__ __device_builtin__
108
+ float __fAtomicAdd_block(float *address, float val);
109
+
110
+ extern __device__ __device_builtin__
111
+ float __fAtomicAdd_system(float *address, float val);
112
+
113
+ extern __device__ __device_builtin__
114
+ double __dAtomicAdd_block(double *address, double val);
115
+
116
+ extern __device__ __device_builtin__
117
+ double __dAtomicAdd_system(double *address, double val);
118
+
119
+ extern __device__ __device_builtin__
120
+ int __iAtomicExch_block(int *address, int val);
121
+
122
+ extern __device__ __device_builtin__
123
+ int __iAtomicExch_system(int *address, int val);
124
+
125
+ extern __device__ __device_builtin__
126
+ unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val);
127
+
128
+ extern __device__ __device_builtin__
129
+ unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val);
130
+
131
+ extern __device__ __device_builtin__
132
+ unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val);
133
+
134
+ extern __device__ __device_builtin__
135
+ unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val);
136
+
137
+ extern __device__ __device_builtin__
138
+ float __fAtomicExch_block(float *address, float val);
139
+
140
+ extern __device__ __device_builtin__
141
+ float __fAtomicExch_system(float *address, float val);
142
+
143
+ extern __device__ __device_builtin__
144
+ int __iAtomicMin_block(int *address, int val);
145
+
146
+ extern __device__ __device_builtin__
147
+ int __iAtomicMin_system(int *address, int val);
148
+
149
+ extern __device__ __device_builtin__
150
+ long long __illAtomicMin_block(long long *address, long long val);
151
+
152
+ extern __device__ __device_builtin__
153
+ long long __illAtomicMin_system(long long *address, long long val);
154
+
155
+ extern __device__ __device_builtin__
156
+ unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val);
157
+
158
+ extern __device__ __device_builtin__
159
+ unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val);
160
+
161
+ extern __device__ __device_builtin__
162
+ unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val);
163
+
164
+ extern __device__ __device_builtin__
165
+ unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val);
166
+
167
+ extern __device__ __device_builtin__
168
+ int __iAtomicMax_block(int *address, int val);
169
+
170
+ extern __device__ __device_builtin__
171
+ int __iAtomicMax_system(int *address, int val);
172
+
173
+ extern __device__ __device_builtin__
174
+ long long __illAtomicMax_block(long long *address, long long val);
175
+
176
+ extern __device__ __device_builtin__
177
+ long long __illAtomicMax_system(long long *address, long long val);
178
+
179
+ extern __device__ __device_builtin__
180
+ unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val);
181
+
182
+ extern __device__ __device_builtin__
183
+ unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val);
184
+
185
+ extern __device__ __device_builtin__
186
+ unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val);
187
+
188
+ extern __device__ __device_builtin__
189
+ unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val);
190
+
191
+ extern __device__ __device_builtin__
192
+ unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val);
193
+
194
+ extern __device__ __device_builtin__
195
+ unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val);
196
+
197
+ extern __device__ __device_builtin__
198
+ unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val);
199
+
200
+ extern __device__ __device_builtin__
201
+ unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val);
202
+
203
+ extern __device__ __device_builtin__
204
+ int __iAtomicCAS_block(int *address, int compare, int val);
205
+
206
+ extern __device__ __device_builtin__
207
+ int __iAtomicCAS_system(int *address, int compare, int val);
208
+
209
+ extern __device__ __device_builtin__
210
+ unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare,
211
+ unsigned int val);
212
+
213
+ extern __device__ __device_builtin__
214
+ unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare,
215
+ unsigned int val);
216
+
217
+ extern __device__ __device_builtin__
218
+ unsigned long long __ullAtomicCAS_block(unsigned long long int *address,
219
+ unsigned long long int compare,
220
+ unsigned long long int val);
221
+
222
+ extern __device__ __device_builtin__
223
+ unsigned long long __ullAtomicCAS_system(unsigned long long int *address,
224
+ unsigned long long int compare,
225
+ unsigned long long int val);
226
+
227
+ extern __device__ __device_builtin__
228
+ int __iAtomicAnd_block(int *address, int val);
229
+
230
+ extern __device__ __device_builtin__
231
+ int __iAtomicAnd_system(int *address, int val);
232
+
233
+ extern __device__ __device_builtin__
234
+ long long __llAtomicAnd_block(long long *address, long long val);
235
+
236
+ extern __device__ __device_builtin__
237
+ long long __llAtomicAnd_system(long long *address, long long val);
238
+
239
+ extern __device__ __device_builtin__
240
+ unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val);
241
+
242
+ extern __device__ __device_builtin__
243
+ unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val);
244
+
245
+ extern __device__ __device_builtin__
246
+ unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val);
247
+
248
+ extern __device__ __device_builtin__
249
+ unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val);
250
+
251
+ extern __device__ __device_builtin__
252
+ int __iAtomicOr_block(int *address, int val);
253
+
254
+ extern __device__ __device_builtin__
255
+ int __iAtomicOr_system(int *address, int val);
256
+
257
+ extern __device__ __device_builtin__
258
+ long long __llAtomicOr_block(long long *address, long long val);
259
+
260
+ extern __device__ __device_builtin__
261
+ long long __llAtomicOr_system(long long *address, long long val);
262
+
263
+ extern __device__ __device_builtin__
264
+ unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val);
265
+
266
+ extern __device__ __device_builtin__
267
+ unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val);
268
+
269
+ extern __device__ __device_builtin__
270
+ unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val);
271
+
272
+ extern __device__ __device_builtin__
273
+ unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val);
274
+
275
+ extern __device__ __device_builtin__
276
+ int __iAtomicXor_block(int *address, int val);
277
+
278
+ extern __device__ __device_builtin__
279
+ int __iAtomicXor_system(int *address, int val);
280
+
281
+ extern __device__ __device_builtin__
282
+ long long __llAtomicXor_block(long long *address, long long val);
283
+
284
+ extern __device__ __device_builtin__
285
+ long long __llAtomicXor_system(long long *address, long long val);
286
+
287
+ extern __device__ __device_builtin__
288
+ unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val);
289
+
290
+ extern __device__ __device_builtin__
291
+ unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val);
292
+
293
+ extern __device__ __device_builtin__
294
+ unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val);
295
+
296
+ extern __device__ __device_builtin__
297
+ unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val);
298
+ }
299
+ #endif /* __CUDA_ARCH__ */
300
+
301
+ /*******************************************************************************
302
+ * *
303
+ * *
304
+ * *
305
+ *******************************************************************************/
306
+
307
+ __SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST
308
+
309
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
310
+ int atomicAdd_block(int *address, int val) __DEF_IF_HOST
311
+
312
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
313
+ int atomicAdd_system(int *address, int val) __DEF_IF_HOST
314
+
315
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
316
+ unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
317
+
318
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
319
+ unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
320
+
321
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
322
+ unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
323
+
324
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
325
+ unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
326
+
327
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
328
+ float atomicAdd_block(float *address, float val) __DEF_IF_HOST
329
+
330
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
331
+ float atomicAdd_system(float *address, float val) __DEF_IF_HOST
332
+
333
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
334
+ double atomicAdd_block(double *address, double val) __DEF_IF_HOST
335
+
336
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
337
+ double atomicAdd_system(double *address, double val) __DEF_IF_HOST
338
+
339
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
340
+ int atomicSub_block(int *address, int val) __DEF_IF_HOST
341
+
342
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
343
+ int atomicSub_system(int *address, int val) __DEF_IF_HOST
344
+
345
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
346
+ unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
347
+
348
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
349
+ unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
350
+
351
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
352
+ int atomicExch_block(int *address, int val) __DEF_IF_HOST
353
+
354
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
355
+ int atomicExch_system(int *address, int val) __DEF_IF_HOST
356
+
357
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
358
+ unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
359
+
360
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
361
+ unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
362
+
363
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
364
+ unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
365
+
366
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
367
+ unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
368
+
369
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
370
+ float atomicExch_block(float *address, float val) __DEF_IF_HOST
371
+
372
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
373
+ float atomicExch_system(float *address, float val) __DEF_IF_HOST
374
+
375
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
376
+ int atomicMin_block(int *address, int val) __DEF_IF_HOST
377
+
378
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
379
+ int atomicMin_system(int *address, int val) __DEF_IF_HOST
380
+
381
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
382
+ long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST
383
+
384
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
385
+ long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST
386
+
387
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
388
+ unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
389
+
390
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
391
+ unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
392
+
393
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
394
+ unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
395
+
396
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
397
+ unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
398
+
399
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
400
+ int atomicMax_block(int *address, int val) __DEF_IF_HOST
401
+
402
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
403
+ int atomicMax_system(int *address, int val) __DEF_IF_HOST
404
+
405
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
406
+ long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST
407
+
408
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
409
+ long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST
410
+
411
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
412
+ unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
413
+
414
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
415
+ unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
416
+
417
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
418
+ unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
419
+
420
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
421
+ unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
422
+
423
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
424
+ unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
425
+
426
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
427
+ unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
428
+
429
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
430
+ unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
431
+
432
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
433
+ unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
434
+
435
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
436
+ int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST
437
+
438
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
439
+ int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST
440
+
441
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
442
+ unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
443
+ unsigned int val) __DEF_IF_HOST
444
+
445
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
446
+ unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
447
+ unsigned int val) __DEF_IF_HOST
448
+
449
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
450
+ unsigned long long int atomicCAS_block(unsigned long long int *address,
451
+ unsigned long long int compare,
452
+ unsigned long long int val) __DEF_IF_HOST
453
+
454
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
455
+ unsigned long long int atomicCAS_system(unsigned long long int *address,
456
+ unsigned long long int compare,
457
+ unsigned long long int val) __DEF_IF_HOST
458
+
459
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
460
+ int atomicAnd_block(int *address, int val) __DEF_IF_HOST
461
+
462
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
463
+ int atomicAnd_system(int *address, int val) __DEF_IF_HOST
464
+
465
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
466
+ long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST
467
+
468
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
469
+ long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST
470
+
471
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
472
+ unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
473
+
474
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
475
+ unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
476
+
477
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
478
+ unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
479
+
480
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
481
+ unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
482
+
483
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
484
+ int atomicOr_block(int *address, int val) __DEF_IF_HOST
485
+
486
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
487
+ int atomicOr_system(int *address, int val) __DEF_IF_HOST
488
+
489
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
490
+ long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST
491
+
492
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
493
+ long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST
494
+
495
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
496
+ unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
497
+
498
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
499
+ unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
500
+
501
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
502
+ unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
503
+
504
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
505
+ unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
506
+
507
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
508
+ int atomicXor_block(int *address, int val) __DEF_IF_HOST
509
+
510
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
511
+ int atomicXor_system(int *address, int val) __DEF_IF_HOST
512
+
513
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
514
+ long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST
515
+
516
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
517
+ long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST
518
+
519
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
520
+ unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
521
+
522
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
523
+ unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
524
+
525
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
526
+ unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
527
+
528
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
529
+ unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
530
+
531
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
532
+
533
+ #endif /* __cplusplus && __CUDACC__ */
534
+
535
+ #undef __SM_60_ATOMIC_FUNCTIONS_DECL__
536
+ #undef __DEF_IF_HOST
537
+
538
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
539
+ #include "sm_60_atomic_functions.hpp"
540
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
541
+
542
+ #endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */
543
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __SM_60_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ /*******************************************************************************
72
+ * *
73
+ * *
74
+ * *
75
+ *******************************************************************************/
76
+
77
+ __SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val)
78
+ {
79
+ return __dAtomicAdd(address, val);
80
+ }
81
+
82
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
83
+ int atomicAdd_block(int *address, int val)
84
+ {
85
+ return __iAtomicAdd_block(address, val);
86
+ }
87
+
88
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
89
+ int atomicAdd_system(int *address, int val)
90
+ {
91
+ return __iAtomicAdd_system(address, val);
92
+ }
93
+
94
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
95
+ unsigned int atomicAdd_block(unsigned int *address, unsigned int val)
96
+ {
97
+ return __uAtomicAdd_block(address, val);
98
+ }
99
+
100
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
101
+ unsigned int atomicAdd_system(unsigned int *address, unsigned int val)
102
+ {
103
+ return __uAtomicAdd_system(address, val);
104
+ }
105
+
106
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
107
+ unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val)
108
+ {
109
+ return __ullAtomicAdd_block(address, val);
110
+ }
111
+
112
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
113
+ unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val)
114
+ {
115
+ return __ullAtomicAdd_system(address, val);
116
+ }
117
+
118
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
119
+ float atomicAdd_block(float *address, float val)
120
+ {
121
+ return __fAtomicAdd_block(address, val);
122
+ }
123
+
124
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
125
+ float atomicAdd_system(float *address, float val)
126
+ {
127
+ return __fAtomicAdd_system(address, val);
128
+ }
129
+
130
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
131
+ double atomicAdd_block(double *address, double val)
132
+ {
133
+ return __dAtomicAdd_block(address, val);
134
+ }
135
+
136
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
137
+ double atomicAdd_system(double *address, double val)
138
+ {
139
+ return __dAtomicAdd_system(address, val);
140
+ }
141
+
142
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
143
+ int atomicSub_block(int *address, int val)
144
+ {
145
+ return __iAtomicAdd_block(address, (unsigned int)-(int)val);
146
+ }
147
+
148
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
149
+ int atomicSub_system(int *address, int val)
150
+ {
151
+ return __iAtomicAdd_system(address, (unsigned int)-(int)val);
152
+ }
153
+
154
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
155
+ unsigned int atomicSub_block(unsigned int *address, unsigned int val)
156
+ {
157
+ return __uAtomicAdd_block(address, (unsigned int)-(int)val);
158
+ }
159
+
160
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
161
+ unsigned int atomicSub_system(unsigned int *address, unsigned int val)
162
+ {
163
+ return __uAtomicAdd_system(address, (unsigned int)-(int)val);
164
+ }
165
+
166
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
167
+ int atomicExch_block(int *address, int val)
168
+ {
169
+ return __iAtomicExch_block(address, val);
170
+ }
171
+
172
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
173
+ int atomicExch_system(int *address, int val)
174
+ {
175
+ return __iAtomicExch_system(address, val);
176
+ }
177
+
178
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
179
+ unsigned int atomicExch_block(unsigned int *address, unsigned int val)
180
+ {
181
+ return __uAtomicExch_block(address, val);
182
+ }
183
+
184
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
185
+ unsigned int atomicExch_system(unsigned int *address, unsigned int val)
186
+ {
187
+ return __uAtomicExch_system(address, val);
188
+ }
189
+
190
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
191
+ unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val)
192
+ {
193
+ return __ullAtomicExch_block(address, val);
194
+ }
195
+
196
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
197
+ unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val)
198
+ {
199
+ return __ullAtomicExch_system(address, val);
200
+ }
201
+
202
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
203
+ float atomicExch_block(float *address, float val)
204
+ {
205
+ return __fAtomicExch_block(address, val);
206
+ }
207
+
208
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
209
+ float atomicExch_system(float *address, float val)
210
+ {
211
+ return __fAtomicExch_system(address, val);
212
+ }
213
+
214
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
215
+ int atomicMin_block(int *address, int val)
216
+ {
217
+ return __iAtomicMin_block(address, val);
218
+ }
219
+
220
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
221
+ int atomicMin_system(int *address, int val)
222
+ {
223
+ return __iAtomicMin_system(address, val);
224
+ }
225
+
226
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
227
+ long long atomicMin_block(long long *address, long long val)
228
+ {
229
+ return __illAtomicMin_block(address, val);
230
+ }
231
+
232
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
233
+ long long atomicMin_system(long long *address, long long val)
234
+ {
235
+ return __illAtomicMin_system(address, val);
236
+ }
237
+
238
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
239
+ unsigned int atomicMin_block(unsigned int *address, unsigned int val)
240
+ {
241
+ return __uAtomicMin_block(address, val);
242
+ }
243
+
244
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
245
+ unsigned int atomicMin_system(unsigned int *address, unsigned int val)
246
+ {
247
+ return __uAtomicMin_system(address, val);
248
+ }
249
+
250
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
251
+ unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val)
252
+ {
253
+ return __ullAtomicMin_block(address, val);
254
+ }
255
+
256
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
257
+ unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val)
258
+ {
259
+ return __ullAtomicMin_system(address, val);
260
+ }
261
+
262
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
263
+ int atomicMax_block(int *address, int val)
264
+ {
265
+ return __iAtomicMax_block(address, val);
266
+ }
267
+
268
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
269
+ int atomicMax_system(int *address, int val)
270
+ {
271
+ return __iAtomicMax_system(address, val);
272
+ }
273
+
274
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
275
+ long long atomicMax_block(long long *address, long long val)
276
+ {
277
+ return __illAtomicMax_block(address, val);
278
+ }
279
+
280
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
281
+ long long atomicMax_system(long long *address, long long val)
282
+ {
283
+ return __illAtomicMax_system(address, val);
284
+ }
285
+
286
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
287
+ unsigned int atomicMax_block(unsigned int *address, unsigned int val)
288
+ {
289
+ return __uAtomicMax_block(address, val);
290
+ }
291
+
292
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
293
+ unsigned int atomicMax_system(unsigned int *address, unsigned int val)
294
+ {
295
+ return __uAtomicMax_system(address, val);
296
+ }
297
+
298
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
299
+ unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val)
300
+ {
301
+ return __ullAtomicMax_block(address, val);
302
+ }
303
+
304
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
305
+ unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val)
306
+ {
307
+ return __ullAtomicMax_system(address, val);
308
+ }
309
+
310
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
311
+ unsigned int atomicInc_block(unsigned int *address, unsigned int val)
312
+ {
313
+ return __uAtomicInc_block(address, val);
314
+ }
315
+
316
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
317
+ unsigned int atomicInc_system(unsigned int *address, unsigned int val)
318
+ {
319
+ return __uAtomicInc_system(address, val);
320
+ }
321
+
322
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
323
+ unsigned int atomicDec_block(unsigned int *address, unsigned int val)
324
+ {
325
+ return __uAtomicDec_block(address, val);
326
+ }
327
+
328
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
329
+ unsigned int atomicDec_system(unsigned int *address, unsigned int val)
330
+ {
331
+ return __uAtomicDec_system(address, val);
332
+ }
333
+
334
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
335
+ int atomicCAS_block(int *address, int compare, int val)
336
+ {
337
+ return __iAtomicCAS_block(address, compare, val);
338
+ }
339
+
340
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
341
+ int atomicCAS_system(int *address, int compare, int val)
342
+ {
343
+ return __iAtomicCAS_system(address, compare, val);
344
+ }
345
+
346
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
347
+ unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
348
+ unsigned int val)
349
+ {
350
+ return __uAtomicCAS_block(address, compare, val);
351
+ }
352
+
353
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
354
+ unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
355
+ unsigned int val)
356
+ {
357
+ return __uAtomicCAS_system(address, compare, val);
358
+ }
359
+
360
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
361
+ unsigned long long int atomicCAS_block(unsigned long long int *address,
362
+ unsigned long long int compare,
363
+ unsigned long long int val)
364
+ {
365
+ return __ullAtomicCAS_block(address, compare, val);
366
+ }
367
+
368
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
369
+ unsigned long long int atomicCAS_system(unsigned long long int *address,
370
+ unsigned long long int compare,
371
+ unsigned long long int val)
372
+ {
373
+ return __ullAtomicCAS_system(address, compare, val);
374
+ }
375
+
376
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
377
+ int atomicAnd_block(int *address, int val)
378
+ {
379
+ return __iAtomicAnd_block(address, val);
380
+ }
381
+
382
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
383
+ int atomicAnd_system(int *address, int val)
384
+ {
385
+ return __iAtomicAnd_system(address, val);
386
+ }
387
+
388
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
389
+ long long atomicAnd_block(long long *address, long long val)
390
+ {
391
+ return __llAtomicAnd_block(address, val);
392
+ }
393
+
394
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
395
+ long long atomicAnd_system(long long *address, long long val)
396
+ {
397
+ return __llAtomicAnd_system(address, val);
398
+ }
399
+
400
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
401
+ unsigned int atomicAnd_block(unsigned int *address, unsigned int val)
402
+ {
403
+ return __uAtomicAnd_block(address, val);
404
+ }
405
+
406
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
407
+ unsigned int atomicAnd_system(unsigned int *address, unsigned int val)
408
+ {
409
+ return __uAtomicAnd_system(address, val);
410
+ }
411
+
412
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
413
+ unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val)
414
+ {
415
+ return __ullAtomicAnd_block(address, val);
416
+ }
417
+
418
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
419
+ unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val)
420
+ {
421
+ return __ullAtomicAnd_system(address, val);
422
+ }
423
+
424
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
425
+ int atomicOr_block(int *address, int val)
426
+ {
427
+ return __iAtomicOr_block(address, val);
428
+ }
429
+
430
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
431
+ int atomicOr_system(int *address, int val)
432
+ {
433
+ return __iAtomicOr_system(address, val);
434
+ }
435
+
436
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
437
+ long long atomicOr_block(long long *address, long long val)
438
+ {
439
+ return __llAtomicOr_block(address, val);
440
+ }
441
+
442
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
443
+ long long atomicOr_system(long long *address, long long val)
444
+ {
445
+ return __llAtomicOr_system(address, val);
446
+ }
447
+
448
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
449
+ unsigned int atomicOr_block(unsigned int *address, unsigned int val)
450
+ {
451
+ return __uAtomicOr_block(address, val);
452
+ }
453
+
454
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
455
+ unsigned int atomicOr_system(unsigned int *address, unsigned int val)
456
+ {
457
+ return __uAtomicOr_system(address, val);
458
+ }
459
+
460
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
461
+ unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val)
462
+ {
463
+ return __ullAtomicOr_block(address, val);
464
+ }
465
+
466
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
467
+ unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val)
468
+ {
469
+ return __ullAtomicOr_system(address, val);
470
+ }
471
+
472
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
473
+ int atomicXor_block(int *address, int val)
474
+ {
475
+ return __iAtomicXor_block(address, val);
476
+ }
477
+
478
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
479
+ int atomicXor_system(int *address, int val)
480
+ {
481
+ return __iAtomicXor_system(address, val);
482
+ }
483
+
484
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
485
+ long long atomicXor_block(long long *address, long long val)
486
+ {
487
+ return __llAtomicXor_block(address, val);
488
+ }
489
+
490
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
491
+ long long atomicXor_system(long long *address, long long val)
492
+ {
493
+ return __llAtomicXor_system(address, val);
494
+ }
495
+
496
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
497
+ unsigned int atomicXor_block(unsigned int *address, unsigned int val)
498
+ {
499
+ return __uAtomicXor_block(address, val);
500
+ }
501
+
502
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
503
+ unsigned int atomicXor_system(unsigned int *address, unsigned int val)
504
+ {
505
+ return __uAtomicXor_system(address, val);
506
+ }
507
+
508
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
509
+ unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val)
510
+ {
511
+ return __ullAtomicXor_block(address, val);
512
+ }
513
+
514
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
515
+ unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val)
516
+ {
517
+ return __ullAtomicXor_system(address, val);
518
+ }
519
+
520
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
521
+
522
+ #endif /* __cplusplus && __CUDACC__ */
523
+
524
+ #undef __SM_60_ATOMIC_FUNCTIONS_DECL__
525
+
526
+ #endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */
527
+
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__TEXTURE_TYPES_H__)
51
+ #define __TEXTURE_TYPES_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "driver_types.h"
60
+
61
+ /**
62
+ * \addtogroup CUDART_TYPES
63
+ *
64
+ * @{
65
+ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ #define cudaTextureType1D 0x01
74
+ #define cudaTextureType2D 0x02
75
+ #define cudaTextureType3D 0x03
76
+ #define cudaTextureTypeCubemap 0x0C
77
+ #define cudaTextureType1DLayered 0xF1
78
+ #define cudaTextureType2DLayered 0xF2
79
+ #define cudaTextureTypeCubemapLayered 0xFC
80
+
81
+ /**
82
+ * CUDA texture address modes
83
+ */
84
+ enum __device_builtin__ cudaTextureAddressMode
85
+ {
86
+ cudaAddressModeWrap = 0, /**< Wrapping address mode */
87
+ cudaAddressModeClamp = 1, /**< Clamp to edge address mode */
88
+ cudaAddressModeMirror = 2, /**< Mirror address mode */
89
+ cudaAddressModeBorder = 3 /**< Border address mode */
90
+ };
91
+
92
+ /**
93
+ * CUDA texture filter modes
94
+ */
95
+ enum __device_builtin__ cudaTextureFilterMode
96
+ {
97
+ cudaFilterModePoint = 0, /**< Point filter mode */
98
+ cudaFilterModeLinear = 1 /**< Linear filter mode */
99
+ };
100
+
101
+ /**
102
+ * CUDA texture read modes
103
+ */
104
+ enum __device_builtin__ cudaTextureReadMode
105
+ {
106
+ cudaReadModeElementType = 0, /**< Read texture as specified element type */
107
+ cudaReadModeNormalizedFloat = 1 /**< Read texture as normalized float */
108
+ };
109
+
110
+ /**
111
+ * CUDA texture descriptor
112
+ */
113
+ struct __device_builtin__ cudaTextureDesc
114
+ {
115
+ /**
116
+ * Texture address mode for up to 3 dimensions
117
+ */
118
+ enum cudaTextureAddressMode addressMode[3];
119
+ /**
120
+ * Texture filter mode
121
+ */
122
+ enum cudaTextureFilterMode filterMode;
123
+ /**
124
+ * Texture read mode
125
+ */
126
+ enum cudaTextureReadMode readMode;
127
+ /**
128
+ * Perform sRGB->linear conversion during texture read
129
+ */
130
+ int sRGB;
131
+ /**
132
+ * Texture Border Color
133
+ */
134
+ float borderColor[4];
135
+ /**
136
+ * Indicates whether texture reads are normalized or not
137
+ */
138
+ int normalizedCoords;
139
+ /**
140
+ * Limit to the anisotropy ratio
141
+ */
142
+ unsigned int maxAnisotropy;
143
+ /**
144
+ * Mipmap filter mode
145
+ */
146
+ enum cudaTextureFilterMode mipmapFilterMode;
147
+ /**
148
+ * Offset applied to the supplied mipmap level
149
+ */
150
+ float mipmapLevelBias;
151
+ /**
152
+ * Lower end of the mipmap level range to clamp access to
153
+ */
154
+ float minMipmapLevelClamp;
155
+ /**
156
+ * Upper end of the mipmap level range to clamp access to
157
+ */
158
+ float maxMipmapLevelClamp;
159
+ /**
160
+ * Disable any trilinear filtering optimizations.
161
+ */
162
+ int disableTrilinearOptimization;
163
+ /**
164
+ * Enable seamless cube map filtering.
165
+ */
166
+ int seamlessCubemap;
167
+ };
168
+
169
+ /**
170
+ * An opaque value that represents a CUDA texture object
171
+ */
172
+ typedef __device_builtin__ unsigned long long cudaTextureObject_t;
173
+
174
+ /** @} */
175
+ /** @} */ /* END CUDART_TYPES */
176
+
177
+ #endif /* !__TEXTURE_TYPES_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__VECTOR_FUNCTIONS_H__)
51
+ #define __VECTOR_FUNCTIONS_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "cuda_runtime_api.h"
60
+
61
+ #if defined(__CUDACC_RTC__)
62
+ #define __VECTOR_FUNCTIONS_DECL__ __host__ __device__
63
+ #else /* !__CUDACC_RTC__ */
64
+ #define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
65
+ #endif /* __CUDACC_RTC__ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ __VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x);
74
+
75
+ __VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x);
76
+
77
+ __VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y);
78
+
79
+ __VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y);
80
+
81
+ __VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z);
82
+
83
+ __VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z);
84
+
85
+ __VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w);
86
+
87
+ __VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w);
88
+
89
+ __VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x);
90
+
91
+ __VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x);
92
+
93
+ __VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y);
94
+
95
+ __VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y);
96
+
97
+ __VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z);
98
+
99
+ __VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z);
100
+
101
+ __VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w);
102
+
103
+ __VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w);
104
+
105
+ __VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x);
106
+
107
+ __VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x);
108
+
109
+ __VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y);
110
+
111
+ __VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y);
112
+
113
+ __VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z);
114
+
115
+ __VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z);
116
+
117
+ __VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w);
118
+
119
+ __VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w);
120
+
121
+ __VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x);
122
+
123
+ __VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x);
124
+
125
+ __VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y);
126
+
127
+ __VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y);
128
+
129
+ __VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z);
130
+
131
+ __VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z);
132
+
133
+ __VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w);
134
+
135
+ __VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w);
136
+
137
+ __VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x);
138
+
139
+ __VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y);
140
+
141
+ __VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z);
142
+
143
+ __VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w);
144
+
145
+ __VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x);
146
+
147
+ __VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x);
148
+
149
+ __VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y);
150
+
151
+ __VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y);
152
+
153
+ __VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z);
154
+
155
+ __VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z);
156
+
157
+ __VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w);
158
+
159
+ __VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w);
160
+
161
+ __VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x);
162
+
163
+ __VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y);
164
+
165
+ __VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z);
166
+
167
+ __VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w);
168
+
169
+ #undef __VECTOR_FUNCTIONS_DECL__
170
+
171
+ #if !defined(__CUDACC_RTC__)
172
+ #include "vector_functions.hpp"
173
+ #endif /* !__CUDACC_RTC__ */
174
+
175
+ #endif /* !__VECTOR_FUNCTIONS_H__ */
llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__VECTOR_FUNCTIONS_HPP__)
51
+ #define __VECTOR_FUNCTIONS_HPP__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "cuda_runtime_api.h"
60
+
61
+ #if defined(__CUDACC_RTC__)
62
+ #define __VECTOR_FUNCTIONS_DECL__ __host__ __device__
63
+ #else /* !__CUDACC_RTC__ */
64
+ #define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
65
+ #endif /* __CUDACC_RTC__ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ __VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x)
74
+ {
75
+ char1 t; t.x = x; return t;
76
+ }
77
+
78
+ __VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x)
79
+ {
80
+ uchar1 t; t.x = x; return t;
81
+ }
82
+
83
+ __VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y)
84
+ {
85
+ char2 t; t.x = x; t.y = y; return t;
86
+ }
87
+
88
+ __VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y)
89
+ {
90
+ uchar2 t; t.x = x; t.y = y; return t;
91
+ }
92
+
93
+ __VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z)
94
+ {
95
+ char3 t; t.x = x; t.y = y; t.z = z; return t;
96
+ }
97
+
98
+ __VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z)
99
+ {
100
+ uchar3 t; t.x = x; t.y = y; t.z = z; return t;
101
+ }
102
+
103
+ __VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w)
104
+ {
105
+ char4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
106
+ }
107
+
108
+ __VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w)
109
+ {
110
+ uchar4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
111
+ }
112
+
113
+ __VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x)
114
+ {
115
+ short1 t; t.x = x; return t;
116
+ }
117
+
118
+ __VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x)
119
+ {
120
+ ushort1 t; t.x = x; return t;
121
+ }
122
+
123
+ __VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y)
124
+ {
125
+ short2 t; t.x = x; t.y = y; return t;
126
+ }
127
+
128
+ __VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y)
129
+ {
130
+ ushort2 t; t.x = x; t.y = y; return t;
131
+ }
132
+
133
+ __VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z)
134
+ {
135
+ short3 t; t.x = x; t.y = y; t.z = z; return t;
136
+ }
137
+
138
+ __VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z)
139
+ {
140
+ ushort3 t; t.x = x; t.y = y; t.z = z; return t;
141
+ }
142
+
143
+ __VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w)
144
+ {
145
+ short4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
146
+ }
147
+
148
+ __VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w)
149
+ {
150
+ ushort4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
151
+ }
152
+
153
+ __VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x)
154
+ {
155
+ int1 t; t.x = x; return t;
156
+ }
157
+
158
+ __VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x)
159
+ {
160
+ uint1 t; t.x = x; return t;
161
+ }
162
+
163
+ __VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y)
164
+ {
165
+ int2 t; t.x = x; t.y = y; return t;
166
+ }
167
+
168
+ __VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y)
169
+ {
170
+ uint2 t; t.x = x; t.y = y; return t;
171
+ }
172
+
173
+ __VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z)
174
+ {
175
+ int3 t; t.x = x; t.y = y; t.z = z; return t;
176
+ }
177
+
178
+ __VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z)
179
+ {
180
+ uint3 t; t.x = x; t.y = y; t.z = z; return t;
181
+ }
182
+
183
+ __VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w)
184
+ {
185
+ int4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
186
+ }
187
+
188
+ __VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w)
189
+ {
190
+ uint4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
191
+ }
192
+
193
+ __VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x)
194
+ {
195
+ long1 t; t.x = x; return t;
196
+ }
197
+
198
+ __VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x)
199
+ {
200
+ ulong1 t; t.x = x; return t;
201
+ }
202
+
203
+ __VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y)
204
+ {
205
+ long2 t; t.x = x; t.y = y; return t;
206
+ }
207
+
208
+ __VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y)
209
+ {
210
+ ulong2 t; t.x = x; t.y = y; return t;
211
+ }
212
+
213
+ __VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z)
214
+ {
215
+ long3 t; t.x = x; t.y = y; t.z = z; return t;
216
+ }
217
+
218
+ __VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z)
219
+ {
220
+ ulong3 t; t.x = x; t.y = y; t.z = z; return t;
221
+ }
222
+
223
+ __VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w)
224
+ {
225
+ long4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
226
+ }
227
+
228
+ __VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w)
229
+ {
230
+ ulong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
231
+ }
232
+
233
+ __VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x)
234
+ {
235
+ float1 t; t.x = x; return t;
236
+ }
237
+
238
+ __VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y)
239
+ {
240
+ float2 t; t.x = x; t.y = y; return t;
241
+ }
242
+
243
+ __VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z)
244
+ {
245
+ float3 t; t.x = x; t.y = y; t.z = z; return t;
246
+ }
247
+
248
+ __VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w)
249
+ {
250
+ float4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
251
+ }
252
+
253
+ __VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x)
254
+ {
255
+ longlong1 t; t.x = x; return t;
256
+ }
257
+
258
+ __VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x)
259
+ {
260
+ ulonglong1 t; t.x = x; return t;
261
+ }
262
+
263
+ __VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y)
264
+ {
265
+ longlong2 t; t.x = x; t.y = y; return t;
266
+ }
267
+
268
+ __VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y)
269
+ {
270
+ ulonglong2 t; t.x = x; t.y = y; return t;
271
+ }
272
+
273
+ __VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z)
274
+ {
275
+ longlong3 t; t.x = x; t.y = y; t.z = z; return t;
276
+ }
277
+
278
+ __VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z)
279
+ {
280
+ ulonglong3 t; t.x = x; t.y = y; t.z = z; return t;
281
+ }
282
+
283
+ __VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w)
284
+ {
285
+ longlong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
286
+ }
287
+
288
+ __VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w)
289
+ {
290
+ ulonglong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
291
+ }
292
+
293
+ __VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x)
294
+ {
295
+ double1 t; t.x = x; return t;
296
+ }
297
+
298
+ __VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y)
299
+ {
300
+ double2 t; t.x = x; t.y = y; return t;
301
+ }
302
+
303
+ __VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z)
304
+ {
305
+ double3 t; t.x = x; t.y = y; t.z = z; return t;
306
+ }
307
+
308
+ __VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w)
309
+ {
310
+ double4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t;
311
+ }
312
+
313
+ #undef __VECTOR_FUNCTIONS_DECL__
314
+
315
+ #endif /* !__VECTOR_FUNCTIONS_HPP__ */
316
+
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn : Neural Networks Library
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_H_)
55
+ #define CUDNN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+ #include "cudnn_adv_train.h"
65
+ #include "cudnn_cnn_infer.h"
66
+ #include "cudnn_cnn_train.h"
67
+
68
+ #include "cudnn_backend.h"
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ }
76
+ #endif
77
+
78
+ #endif /* CUDNN_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_infer : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_INFER_H_)
55
+ #define CUDNN_ADV_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_ADV_INFER_MAJOR 8
65
+ #define CUDNN_ADV_INFER_MINOR 9
66
+ #define CUDNN_ADV_INFER_PATCH 2
67
+
68
+ #if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN ADV INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* BASIC RNN API */
78
+
79
+ typedef enum {
80
+ CUDNN_FWD_MODE_INFERENCE = 0,
81
+ CUDNN_FWD_MODE_TRAINING = 1,
82
+ } cudnnForwardMode_t;
83
+
84
+ typedef enum {
85
+ CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */
86
+ CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */
87
+ CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */
88
+ CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */
89
+ } cudnnRNNMode_t;
90
+
91
+ typedef enum {
92
+ CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */
93
+ CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */
94
+ CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */
95
+ CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */
96
+ } cudnnRNNBiasMode_t;
97
+
98
+ typedef enum {
99
+ CUDNN_UNIDIRECTIONAL = 0, /* single direction network */
100
+ CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */
101
+ } cudnnDirectionMode_t;
102
+
103
+ typedef enum {
104
+ CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */
105
+ CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */
106
+ } cudnnRNNInputMode_t;
107
+
108
+ typedef enum {
109
+ CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */
110
+ CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */
111
+ } cudnnRNNClipMode_t;
112
+
113
+ typedef enum {
114
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */
115
+ CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */
116
+ CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */
117
+ } cudnnRNNDataLayout_t;
118
+
119
+ /* Legacy type for backward compatibility */
120
+ typedef unsigned cudnnRNNPaddingMode_t;
121
+
122
+ /* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */
123
+ #define CUDNN_RNN_PADDED_IO_DISABLED 0
124
+ #define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0)
125
+
126
+ struct cudnnRNNStruct;
127
+ typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t;
128
+
129
+ struct cudnnPersistentRNNPlan;
130
+ typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t;
131
+
132
+ struct cudnnRNNDataStruct;
133
+ typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t;
134
+
135
+ cudnnStatus_t CUDNNWINAPI
136
+ cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc);
137
+
138
+ cudnnStatus_t CUDNNWINAPI
139
+ cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
143
+ cudnnRNNAlgo_t algo,
144
+ cudnnRNNMode_t cellMode,
145
+ cudnnRNNBiasMode_t biasMode,
146
+ cudnnDirectionMode_t dirMode,
147
+ cudnnRNNInputMode_t inputMode,
148
+ cudnnDataType_t dataType,
149
+ cudnnDataType_t mathPrec,
150
+ cudnnMathType_t mathType,
151
+ int32_t inputSize,
152
+ int32_t hiddenSize,
153
+ int32_t projSize,
154
+ int32_t numLayers,
155
+ cudnnDropoutDescriptor_t dropoutDesc,
156
+ uint32_t auxFlags);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc,
160
+ cudnnRNNAlgo_t *algo,
161
+ cudnnRNNMode_t *cellMode,
162
+ cudnnRNNBiasMode_t *biasMode,
163
+ cudnnDirectionMode_t *dirMode,
164
+ cudnnRNNInputMode_t *inputMode,
165
+ cudnnDataType_t *dataType,
166
+ cudnnDataType_t *mathPrec,
167
+ cudnnMathType_t *mathType,
168
+ int32_t *inputSize,
169
+ int32_t *hiddenSize,
170
+ int32_t *projSize,
171
+ int32_t *numLayers,
172
+ cudnnDropoutDescriptor_t *dropoutDesc,
173
+ uint32_t *auxFlags);
174
+
175
+ /*
176
+ * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision
177
+ * compute precision is further modified by cudnnSetRNNMatrixMathType()
178
+ * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage
179
+ * dropout is between RNN layers, not between recurrent steps
180
+ */
181
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
182
+ cudnnSetRNNDescriptor_v6(cudnnHandle_t handle,
183
+ cudnnRNNDescriptor_t rnnDesc,
184
+ const int hiddenSize,
185
+ const int numLayers,
186
+ cudnnDropoutDescriptor_t dropoutDesc,
187
+ cudnnRNNInputMode_t inputMode,
188
+ cudnnDirectionMode_t direction,
189
+ cudnnRNNMode_t cellMode,
190
+ cudnnRNNAlgo_t algo,
191
+ cudnnDataType_t mathPrec);
192
+
193
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
194
+ cudnnGetRNNDescriptor_v6(cudnnHandle_t handle,
195
+ cudnnRNNDescriptor_t rnnDesc,
196
+ int *hiddenSize,
197
+ int *numLayers,
198
+ cudnnDropoutDescriptor_t *dropoutDesc,
199
+ cudnnRNNInputMode_t *inputMode,
200
+ cudnnDirectionMode_t *direction,
201
+ cudnnRNNMode_t *cellMode,
202
+ cudnnRNNAlgo_t *algo,
203
+ cudnnDataType_t *mathPrec);
204
+
205
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
206
+ cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType);
207
+
208
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
209
+ cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType);
210
+
211
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
212
+ cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode);
213
+
214
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
215
+ cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode);
216
+
217
+ cudnnStatus_t CUDNNWINAPI
218
+ cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc,
219
+ cudnnRNNClipMode_t clipMode,
220
+ cudnnNanPropagation_t clipNanOpt,
221
+ double lclip,
222
+ double rclip);
223
+
224
+ cudnnStatus_t CUDNNWINAPI
225
+ cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc,
226
+ cudnnRNNClipMode_t *clipMode,
227
+ cudnnNanPropagation_t *clipNanOpt,
228
+ double *lclip,
229
+ double *rclip);
230
+
231
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
232
+ cudnnRNNSetClip(cudnnHandle_t handle,
233
+ cudnnRNNDescriptor_t rnnDesc,
234
+ cudnnRNNClipMode_t clipMode,
235
+ cudnnNanPropagation_t clipNanOpt,
236
+ double lclip,
237
+ double rclip);
238
+
239
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
240
+ cudnnRNNGetClip(cudnnHandle_t handle,
241
+ cudnnRNNDescriptor_t rnnDesc,
242
+ cudnnRNNClipMode_t *clipMode,
243
+ cudnnNanPropagation_t *clipNanOpt,
244
+ double *lclip,
245
+ double *rclip);
246
+
247
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetRNNProjectionLayers(cudnnHandle_t handle,
249
+ cudnnRNNDescriptor_t rnnDesc,
250
+ const int recProjSize,
251
+ const int outProjSize);
252
+
253
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
254
+ cudnnGetRNNProjectionLayers(cudnnHandle_t handle,
255
+ const cudnnRNNDescriptor_t rnnDesc,
256
+ int *recProjSize,
257
+ int *outProjSize);
258
+
259
+ /* Expensive. Creates the plan for the specific settings. */
260
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
261
+ cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc,
262
+ const int minibatch,
263
+ const cudnnDataType_t dataType,
264
+ cudnnPersistentRNNPlan_t *plan);
265
+
266
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
267
+ cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan);
268
+
269
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
270
+ cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan);
271
+
272
+ cudnnStatus_t CUDNNWINAPI
273
+ cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch);
274
+
275
+ /* dataType in weight descriptors and input descriptors is used to describe storage */
276
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
277
+ cudnnGetRNNWorkspaceSize(cudnnHandle_t handle,
278
+ const cudnnRNNDescriptor_t rnnDesc,
279
+ const int seqLength,
280
+ const cudnnTensorDescriptor_t *xDesc,
281
+ size_t *sizeInBytes);
282
+
283
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
284
+ cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle,
285
+ const cudnnRNNDescriptor_t rnnDesc,
286
+ const int seqLength,
287
+ const cudnnTensorDescriptor_t *xDesc,
288
+ size_t *sizeInBytes);
289
+
290
+ cudnnStatus_t CUDNNWINAPI
291
+ cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle,
292
+ cudnnRNNDescriptor_t rnnDesc,
293
+ cudnnForwardMode_t fwdMode,
294
+ cudnnRNNDataDescriptor_t xDesc,
295
+ size_t *workSpaceSize,
296
+ size_t *reserveSpaceSize);
297
+
298
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
299
+ cudnnGetRNNParamsSize(cudnnHandle_t handle,
300
+ const cudnnRNNDescriptor_t rnnDesc,
301
+ const cudnnTensorDescriptor_t xDesc,
302
+ size_t *sizeInBytes,
303
+ cudnnDataType_t dataType);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize);
307
+
308
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
309
+ cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle,
310
+ const cudnnRNNDescriptor_t rnnDesc,
311
+ const int pseudoLayer,
312
+ const cudnnTensorDescriptor_t xDesc,
313
+ const cudnnFilterDescriptor_t wDesc,
314
+ const void *w,
315
+ const int linLayerID,
316
+ cudnnFilterDescriptor_t linLayerMatDesc,
317
+ void **linLayerMat);
318
+
319
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
320
+ cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle,
321
+ const cudnnRNNDescriptor_t rnnDesc,
322
+ const int pseudoLayer,
323
+ const cudnnTensorDescriptor_t xDesc,
324
+ const cudnnFilterDescriptor_t wDesc,
325
+ const void *w,
326
+ const int linLayerID,
327
+ cudnnFilterDescriptor_t linLayerBiasDesc,
328
+ void **linLayerBias);
329
+
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnGetRNNWeightParams(cudnnHandle_t handle,
332
+ cudnnRNNDescriptor_t rnnDesc,
333
+ int32_t pseudoLayer,
334
+ size_t weightSpaceSize,
335
+ const void *weightSpace,
336
+ int32_t linLayerID,
337
+ cudnnTensorDescriptor_t mDesc,
338
+ void **mAddr,
339
+ cudnnTensorDescriptor_t bDesc,
340
+ void **bAddr);
341
+
342
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
343
+ cudnnRNNForwardInference(cudnnHandle_t handle,
344
+ const cudnnRNNDescriptor_t rnnDesc,
345
+ const int seqLength,
346
+ const cudnnTensorDescriptor_t *xDesc,
347
+ const void *x,
348
+ const cudnnTensorDescriptor_t hxDesc,
349
+ const void *hx,
350
+ const cudnnTensorDescriptor_t cxDesc,
351
+ const void *cx,
352
+ const cudnnFilterDescriptor_t wDesc,
353
+ const void *w,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ void *y,
356
+ const cudnnTensorDescriptor_t hyDesc,
357
+ void *hy,
358
+ const cudnnTensorDescriptor_t cyDesc,
359
+ void *cy,
360
+ void *workSpace,
361
+ size_t workSpaceSizeInBytes);
362
+
363
+ /* RNN EX API */
364
+
365
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
366
+ cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode);
367
+
368
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
369
+ cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode);
370
+
371
+ cudnnStatus_t CUDNNWINAPI
372
+ cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc);
373
+
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc);
376
+
377
+ cudnnStatus_t CUDNNWINAPI
378
+ cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
379
+ cudnnDataType_t dataType,
380
+ cudnnRNNDataLayout_t layout,
381
+ int maxSeqLength,
382
+ int batchSize,
383
+ int vectorSize,
384
+ const int seqLengthArray[], /* length of each sequence in the batch */
385
+ void *paddingFill); /* symbol for filling padding position in output */
386
+
387
+ cudnnStatus_t CUDNNWINAPI
388
+ cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc,
389
+ cudnnDataType_t *dataType,
390
+ cudnnRNNDataLayout_t *layout,
391
+ int *maxSeqLength,
392
+ int *batchSize,
393
+ int *vectorSize,
394
+ int arrayLengthRequested,
395
+ int seqLengthArray[],
396
+ void *paddingFill);
397
+
398
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
399
+ cudnnRNNForwardInferenceEx(cudnnHandle_t handle,
400
+ const cudnnRNNDescriptor_t rnnDesc,
401
+ const cudnnRNNDataDescriptor_t xDesc,
402
+ const void *x,
403
+ const cudnnTensorDescriptor_t hxDesc,
404
+ const void *hx,
405
+ const cudnnTensorDescriptor_t cxDesc,
406
+ const void *cx,
407
+ const cudnnFilterDescriptor_t wDesc,
408
+ const void *w,
409
+ const cudnnRNNDataDescriptor_t yDesc,
410
+ void *y,
411
+ const cudnnTensorDescriptor_t hyDesc,
412
+ void *hy,
413
+ const cudnnTensorDescriptor_t cyDesc,
414
+ void *cy,
415
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
416
+ const void *keys, /* reserved, should pass NULL */
417
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
418
+ void *cAttn, /* reserved, should pass NULL */
419
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
420
+ void *iAttn, /* reserved, should pass NULL */
421
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
422
+ void *queries, /* reserved, should pass NULL */
423
+ void *workSpace,
424
+ size_t workSpaceSizeInBytes);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnRNNForward(cudnnHandle_t handle,
428
+ cudnnRNNDescriptor_t rnnDesc,
429
+ cudnnForwardMode_t fwdMode,
430
+ const int32_t devSeqLengths[],
431
+ cudnnRNNDataDescriptor_t xDesc,
432
+ const void *x,
433
+ cudnnRNNDataDescriptor_t yDesc,
434
+ void *y,
435
+ cudnnTensorDescriptor_t hDesc,
436
+ const void *hx,
437
+ void *hy,
438
+ cudnnTensorDescriptor_t cDesc,
439
+ const void *cx,
440
+ void *cy,
441
+ size_t weightSpaceSize,
442
+ const void *weightSpace,
443
+ size_t workSpaceSize,
444
+ void *workSpace,
445
+ size_t reserveSpaceSize,
446
+ void *reserveSpace);
447
+
448
+ /* RNN FIND API */
449
+
450
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
451
+ cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc);
452
+
453
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
454
+ cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
455
+
456
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
457
+ cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle,
458
+ const cudnnRNNDescriptor_t rnnDesc,
459
+ const int seqLength,
460
+ const cudnnTensorDescriptor_t *xDesc,
461
+ const void *x,
462
+ const cudnnTensorDescriptor_t hxDesc,
463
+ const void *hx,
464
+ const cudnnTensorDescriptor_t cxDesc,
465
+ const void *cx,
466
+ const cudnnFilterDescriptor_t wDesc,
467
+ const void *w,
468
+ const cudnnTensorDescriptor_t *yDesc,
469
+ void *y,
470
+ const cudnnTensorDescriptor_t hyDesc,
471
+ void *hy,
472
+ const cudnnTensorDescriptor_t cyDesc,
473
+ void *cy,
474
+ const float findIntensity,
475
+ const int requestedAlgoCount,
476
+ int *returnedAlgoCount,
477
+ cudnnAlgorithmPerformance_t *perfResults,
478
+ void *workspace,
479
+ size_t workSpaceSizeInBytes);
480
+
481
+ /* Sequence data descriptor */
482
+
483
+ typedef enum {
484
+ CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */
485
+ CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */
486
+ CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */
487
+ CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */
488
+ } cudnnSeqDataAxis_t;
489
+
490
+ struct cudnnSeqDataStruct;
491
+ typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t;
492
+
493
+ #define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */
494
+
495
+ cudnnStatus_t CUDNNWINAPI
496
+ cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc);
497
+
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc);
500
+
501
+ cudnnStatus_t CUDNNWINAPI
502
+ cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc,
503
+ cudnnDataType_t dataType,
504
+ int nbDims,
505
+ const int dimA[],
506
+ const cudnnSeqDataAxis_t axes[],
507
+ size_t seqLengthArraySize,
508
+ const int seqLengthArray[],
509
+ void *paddingFill);
510
+
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc,
513
+ cudnnDataType_t *dataType,
514
+ int *nbDims,
515
+ int nbDimsRequested,
516
+ int dimA[],
517
+ cudnnSeqDataAxis_t axes[],
518
+ size_t *seqLengthArraySize,
519
+ size_t seqLengthSizeRequested,
520
+ int seqLengthArray[],
521
+ void *paddingFill);
522
+
523
+ /* Multihead Attention */
524
+
525
+ /* Legacy type for backward compatibility */
526
+ typedef unsigned cudnnAttnQueryMap_t;
527
+
528
+ /*
529
+ * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor().
530
+ * Use the bitwise OR operator to combine several settings listed below. Additional
531
+ * minor options can be added here w/o changing or introducing new API functions.
532
+ */
533
+ #define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */
534
+ #define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */
535
+ #define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */
536
+ #define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */
537
+
538
+ struct cudnnAttnStruct;
539
+ typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t;
540
+
541
+ cudnnStatus_t CUDNNWINAPI
542
+ cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc);
543
+
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
549
+ unsigned attnMode,
550
+ int nHeads,
551
+ double smScaler,
552
+ cudnnDataType_t dataType,
553
+ cudnnDataType_t computePrec,
554
+ cudnnMathType_t mathType,
555
+ cudnnDropoutDescriptor_t attnDropoutDesc,
556
+ cudnnDropoutDescriptor_t postDropoutDesc,
557
+ int qSize,
558
+ int kSize,
559
+ int vSize,
560
+ int qProjSize,
561
+ int kProjSize,
562
+ int vProjSize,
563
+ int oProjSize,
564
+ int qoMaxSeqLength,
565
+ int kvMaxSeqLength,
566
+ int maxBatchSize,
567
+ int maxBeamSize);
568
+
569
+ cudnnStatus_t CUDNNWINAPI
570
+ cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc,
571
+ unsigned *attnMode,
572
+ int *nHeads,
573
+ double *smScaler,
574
+ cudnnDataType_t *dataType,
575
+ cudnnDataType_t *computePrec,
576
+ cudnnMathType_t *mathType,
577
+ cudnnDropoutDescriptor_t *attnDropoutDesc,
578
+ cudnnDropoutDescriptor_t *postDropoutDesc,
579
+ int *qSize,
580
+ int *kSize,
581
+ int *vSize,
582
+ int *qProjSize,
583
+ int *kProjSize,
584
+ int *vProjSize,
585
+ int *oProjSize,
586
+ int *qoMaxSeqLength,
587
+ int *kvMaxSeqLength,
588
+ int *maxBatchSize,
589
+ int *maxBeamSize);
590
+
591
+ cudnnStatus_t CUDNNWINAPI
592
+ cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle,
593
+ const cudnnAttnDescriptor_t attnDesc,
594
+ size_t *weightSizeInBytes,
595
+ size_t *workSpaceSizeInBytes,
596
+ size_t *reserveSpaceSizeInBytes);
597
+
598
+ typedef enum {
599
+ CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */
600
+ CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */
601
+ CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */
602
+ CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */
603
+ CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */
604
+ CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */
605
+ CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */
606
+ CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */
607
+ } cudnnMultiHeadAttnWeightKind_t;
608
+
609
+ #define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */
610
+
611
+ cudnnStatus_t CUDNNWINAPI
612
+ cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle,
613
+ const cudnnAttnDescriptor_t attnDesc,
614
+ cudnnMultiHeadAttnWeightKind_t wKind,
615
+ size_t weightSizeInBytes,
616
+ const void *weights,
617
+ cudnnTensorDescriptor_t wDesc,
618
+ void **wAddr);
619
+
620
+ cudnnStatus_t CUDNNWINAPI
621
+ cudnnMultiHeadAttnForward(cudnnHandle_t handle,
622
+ const cudnnAttnDescriptor_t attnDesc,
623
+ int currIdx,
624
+ const int loWinIdx[],
625
+ const int hiWinIdx[],
626
+ const int devSeqLengthsQO[],
627
+ const int devSeqLengthsKV[],
628
+ const cudnnSeqDataDescriptor_t qDesc,
629
+ const void *queries,
630
+ const void *residuals,
631
+ const cudnnSeqDataDescriptor_t kDesc,
632
+ const void *keys,
633
+ const cudnnSeqDataDescriptor_t vDesc,
634
+ const void *values,
635
+ const cudnnSeqDataDescriptor_t oDesc,
636
+ void *out,
637
+ size_t weightSizeInBytes,
638
+ const void *weights,
639
+ size_t workSpaceSizeInBytes,
640
+ void *workSpace,
641
+ size_t reserveSpaceSizeInBytes,
642
+ void *reserveSpace);
643
+
644
+ /*
645
+ * \brief Cross-library version checker.
646
+ * This function is implemented differently in each sub-library. Each sublib
647
+ * checks whether its own version matches that of its dependencies.
648
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
649
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
650
+ */
651
+ cudnnStatus_t CUDNNWINAPI
652
+ cudnnAdvInferVersionCheck(void);
653
+
654
+ #if defined(__cplusplus)
655
+ }
656
+ #endif
657
+
658
+ #endif /* CUDNN_ADV_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_train : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_TRAIN_H_)
55
+ #define CUDNN_ADV_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+
65
+ /* These version numbers are autogenerated, do not edit manually. */
66
+ #define CUDNN_ADV_TRAIN_MAJOR 8
67
+ #define CUDNN_ADV_TRAIN_MINOR 9
68
+ #define CUDNN_ADV_TRAIN_PATCH 2
69
+
70
+ #if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \
71
+ (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL)
72
+ #error Version mismatch in cuDNN ADV TRAIN!!!
73
+ #endif
74
+
75
+ #if defined(__cplusplus)
76
+ extern "C" {
77
+ #endif
78
+
79
+ typedef enum {
80
+ CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */
81
+ CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */
82
+ } cudnnWgradMode_t;
83
+
84
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
85
+ cudnnRNNForwardTraining(cudnnHandle_t handle,
86
+ const cudnnRNNDescriptor_t rnnDesc,
87
+ const int seqLength,
88
+ const cudnnTensorDescriptor_t *xDesc,
89
+ const void *x,
90
+ const cudnnTensorDescriptor_t hxDesc,
91
+ const void *hx,
92
+ const cudnnTensorDescriptor_t cxDesc,
93
+ const void *cx,
94
+ const cudnnFilterDescriptor_t wDesc,
95
+ const void *w,
96
+ const cudnnTensorDescriptor_t *yDesc,
97
+ void *y,
98
+ const cudnnTensorDescriptor_t hyDesc,
99
+ void *hy,
100
+ const cudnnTensorDescriptor_t cyDesc,
101
+ void *cy,
102
+ void *workSpace,
103
+ size_t workSpaceSizeInBytes,
104
+ void *reserveSpace,
105
+ size_t reserveSpaceSizeInBytes);
106
+
107
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
108
+ cudnnRNNBackwardData(cudnnHandle_t handle,
109
+ const cudnnRNNDescriptor_t rnnDesc,
110
+ const int seqLength,
111
+ const cudnnTensorDescriptor_t *yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t *dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t dhyDesc,
116
+ const void *dhy,
117
+ const cudnnTensorDescriptor_t dcyDesc,
118
+ const void *dcy,
119
+ const cudnnFilterDescriptor_t wDesc,
120
+ const void *w,
121
+ const cudnnTensorDescriptor_t hxDesc,
122
+ const void *hx,
123
+ const cudnnTensorDescriptor_t cxDesc,
124
+ const void *cx,
125
+ const cudnnTensorDescriptor_t *dxDesc,
126
+ void *dx,
127
+ const cudnnTensorDescriptor_t dhxDesc,
128
+ void *dhx,
129
+ const cudnnTensorDescriptor_t dcxDesc,
130
+ void *dcx,
131
+ void *workSpace,
132
+ size_t workSpaceSizeInBytes,
133
+ void *reserveSpace,
134
+ size_t reserveSpaceSizeInBytes);
135
+
136
+ cudnnStatus_t CUDNNWINAPI
137
+ cudnnRNNBackwardData_v8(cudnnHandle_t handle,
138
+ cudnnRNNDescriptor_t rnnDesc,
139
+ const int32_t devSeqLengths[],
140
+ cudnnRNNDataDescriptor_t yDesc,
141
+ const void *y,
142
+ const void *dy,
143
+ cudnnRNNDataDescriptor_t xDesc,
144
+ void *dx,
145
+ cudnnTensorDescriptor_t hDesc,
146
+ const void *hx,
147
+ const void *dhy,
148
+ void *dhx,
149
+ cudnnTensorDescriptor_t cDesc,
150
+ const void *cx,
151
+ const void *dcy,
152
+ void *dcx,
153
+ size_t weightSpaceSize,
154
+ const void *weightSpace,
155
+ size_t workSpaceSize,
156
+ void *workSpace,
157
+ size_t reserveSpaceSize,
158
+ void *reserveSpace);
159
+
160
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
161
+ cudnnRNNBackwardWeights(cudnnHandle_t handle,
162
+ const cudnnRNNDescriptor_t rnnDesc,
163
+ const int seqLength,
164
+ const cudnnTensorDescriptor_t *xDesc,
165
+ const void *x,
166
+ const cudnnTensorDescriptor_t hxDesc,
167
+ const void *hx,
168
+ const cudnnTensorDescriptor_t *yDesc,
169
+ const void *y,
170
+ const void *workSpace,
171
+ size_t workSpaceSizeInBytes,
172
+ const cudnnFilterDescriptor_t dwDesc,
173
+ void *dw,
174
+ const void *reserveSpace,
175
+ size_t reserveSpaceSizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnRNNBackwardWeights_v8(cudnnHandle_t handle,
179
+ cudnnRNNDescriptor_t rnnDesc,
180
+ cudnnWgradMode_t addGrad,
181
+ const int32_t devSeqLengths[],
182
+ cudnnRNNDataDescriptor_t xDesc,
183
+ const void *x,
184
+ cudnnTensorDescriptor_t hDesc,
185
+ const void *hx,
186
+ cudnnRNNDataDescriptor_t yDesc,
187
+ const void *y,
188
+ size_t weightSpaceSize,
189
+ void *dweightSpace,
190
+ size_t workSpaceSize,
191
+ void *workSpace,
192
+ size_t reserveSpaceSize,
193
+ void *reserveSpace);
194
+
195
+ /* RNN EX API */
196
+
197
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
198
+ cudnnRNNForwardTrainingEx(cudnnHandle_t handle,
199
+ const cudnnRNNDescriptor_t rnnDesc,
200
+ const cudnnRNNDataDescriptor_t xDesc,
201
+ const void *x,
202
+ const cudnnTensorDescriptor_t hxDesc,
203
+ const void *hx,
204
+ const cudnnTensorDescriptor_t cxDesc,
205
+ const void *cx,
206
+ const cudnnFilterDescriptor_t wDesc,
207
+ const void *w,
208
+ const cudnnRNNDataDescriptor_t yDesc,
209
+ void *y,
210
+ const cudnnTensorDescriptor_t hyDesc,
211
+ void *hy,
212
+ const cudnnTensorDescriptor_t cyDesc,
213
+ void *cy,
214
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
215
+ const void *keys, /* reserved, should pass NULL */
216
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
217
+ void *cAttn, /* reserved, should pass NULL */
218
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
219
+ void *iAttn, /* reserved, should pass NULL */
220
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
221
+ void *queries, /* reserved, should pass NULL */
222
+ void *workSpace,
223
+ size_t workSpaceSizeInBytes,
224
+ void *reserveSpace,
225
+ size_t reserveSpaceSizeInBytes);
226
+
227
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
228
+ cudnnRNNBackwardDataEx(cudnnHandle_t handle,
229
+ const cudnnRNNDescriptor_t rnnDesc,
230
+ const cudnnRNNDataDescriptor_t yDesc,
231
+ const void *y,
232
+ const cudnnRNNDataDescriptor_t dyDesc,
233
+ const void *dy,
234
+ const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */
235
+ const void *dcAttn, /* reserved, should pass NULL */
236
+ const cudnnTensorDescriptor_t dhyDesc,
237
+ const void *dhy,
238
+ const cudnnTensorDescriptor_t dcyDesc,
239
+ const void *dcy,
240
+ const cudnnFilterDescriptor_t wDesc,
241
+ const void *w,
242
+ const cudnnTensorDescriptor_t hxDesc,
243
+ const void *hx,
244
+ const cudnnTensorDescriptor_t cxDesc,
245
+ const void *cx,
246
+ const cudnnRNNDataDescriptor_t dxDesc,
247
+ void *dx,
248
+ const cudnnTensorDescriptor_t dhxDesc,
249
+ void *dhx,
250
+ const cudnnTensorDescriptor_t dcxDesc,
251
+ void *dcx,
252
+ const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */
253
+ void *dkeys, /* reserved, should pass NULL */
254
+ void *workSpace,
255
+ size_t workSpaceSizeInBytes,
256
+ void *reserveSpace,
257
+ size_t reserveSpaceSizeInBytes);
258
+
259
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
260
+ cudnnRNNBackwardWeightsEx(cudnnHandle_t handle,
261
+ const cudnnRNNDescriptor_t rnnDesc,
262
+ const cudnnRNNDataDescriptor_t xDesc,
263
+ const void *x,
264
+ const cudnnTensorDescriptor_t hxDesc,
265
+ const void *hx,
266
+ const cudnnRNNDataDescriptor_t yDesc,
267
+ const void *y,
268
+ void *workSpace,
269
+ size_t workSpaceSizeInBytes,
270
+ const cudnnFilterDescriptor_t dwDesc,
271
+ void *dw,
272
+ void *reserveSpace,
273
+ size_t reserveSpaceSizeInBytes);
274
+
275
+ /* RNN FIND API */
276
+
277
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
278
+ cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
279
+
280
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
281
+ cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle,
282
+ const cudnnRNNDescriptor_t rnnDesc,
283
+ const int seqLength,
284
+ const cudnnTensorDescriptor_t *xDesc,
285
+ const void *x,
286
+ const cudnnTensorDescriptor_t hxDesc,
287
+ const void *hx,
288
+ const cudnnTensorDescriptor_t cxDesc,
289
+ const void *cx,
290
+ const cudnnFilterDescriptor_t wDesc,
291
+ const void *w,
292
+ const cudnnTensorDescriptor_t *yDesc,
293
+ void *y,
294
+ const cudnnTensorDescriptor_t hyDesc,
295
+ void *hy,
296
+ const cudnnTensorDescriptor_t cyDesc,
297
+ void *cy,
298
+ const float findIntensity,
299
+ const int requestedAlgoCount,
300
+ int *returnedAlgoCount,
301
+ cudnnAlgorithmPerformance_t *perfResults,
302
+ void *workspace,
303
+ size_t workSpaceSizeInBytes,
304
+ void *reserveSpace,
305
+ size_t reserveSpaceSizeInBytes);
306
+
307
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
308
+ cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
309
+
310
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
311
+ cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle,
312
+ const cudnnRNNDescriptor_t rnnDesc,
313
+ const int seqLength,
314
+ const cudnnTensorDescriptor_t *yDesc,
315
+ const void *y,
316
+ const cudnnTensorDescriptor_t *dyDesc,
317
+ const void *dy,
318
+ const cudnnTensorDescriptor_t dhyDesc,
319
+ const void *dhy,
320
+ const cudnnTensorDescriptor_t dcyDesc,
321
+ const void *dcy,
322
+ const cudnnFilterDescriptor_t wDesc,
323
+ const void *w,
324
+ const cudnnTensorDescriptor_t hxDesc,
325
+ const void *hx,
326
+ const cudnnTensorDescriptor_t cxDesc,
327
+ const void *cx,
328
+ const cudnnTensorDescriptor_t *dxDesc,
329
+ void *dx,
330
+ const cudnnTensorDescriptor_t dhxDesc,
331
+ void *dhx,
332
+ const cudnnTensorDescriptor_t dcxDesc,
333
+ void *dcx,
334
+ const float findIntensity,
335
+ const int requestedAlgoCount,
336
+ int *returnedAlgoCount,
337
+ cudnnAlgorithmPerformance_t *perfResults,
338
+ void *workspace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
345
+
346
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
347
+ cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle,
348
+ const cudnnRNNDescriptor_t rnnDesc,
349
+ const int seqLength,
350
+ const cudnnTensorDescriptor_t *xDesc,
351
+ const void *x,
352
+ const cudnnTensorDescriptor_t hxDesc,
353
+ const void *hx,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ const void *y,
356
+ const float findIntensity,
357
+ const int requestedAlgoCount,
358
+ int *returnedAlgoCount,
359
+ cudnnAlgorithmPerformance_t *perfResults,
360
+ const void *workspace,
361
+ size_t workSpaceSizeInBytes,
362
+ const cudnnFilterDescriptor_t dwDesc,
363
+ void *dw,
364
+ const void *reserveSpace,
365
+ size_t reserveSpaceSizeInBytes);
366
+
367
+ cudnnStatus_t CUDNNWINAPI
368
+ cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle,
369
+ const cudnnAttnDescriptor_t attnDesc,
370
+ const int loWinIdx[],
371
+ const int hiWinIdx[],
372
+ const int devSeqLengthsDQDO[],
373
+ const int devSeqLengthsDKDV[],
374
+ const cudnnSeqDataDescriptor_t doDesc,
375
+ const void *dout,
376
+ const cudnnSeqDataDescriptor_t dqDesc,
377
+ void *dqueries,
378
+ const void *queries,
379
+ const cudnnSeqDataDescriptor_t dkDesc,
380
+ void *dkeys,
381
+ const void *keys,
382
+ const cudnnSeqDataDescriptor_t dvDesc,
383
+ void *dvalues,
384
+ const void *values,
385
+ size_t weightSizeInBytes,
386
+ const void *weights,
387
+ size_t workSpaceSizeInBytes,
388
+ void *workSpace,
389
+ size_t reserveSpaceSizeInBytes,
390
+ void *reserveSpace);
391
+
392
+ cudnnStatus_t CUDNNWINAPI
393
+ cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle,
394
+ const cudnnAttnDescriptor_t attnDesc,
395
+ cudnnWgradMode_t addGrad,
396
+ const cudnnSeqDataDescriptor_t qDesc,
397
+ const void *queries,
398
+ const cudnnSeqDataDescriptor_t kDesc,
399
+ const void *keys,
400
+ const cudnnSeqDataDescriptor_t vDesc,
401
+ const void *values,
402
+ const cudnnSeqDataDescriptor_t doDesc,
403
+ const void *dout,
404
+ size_t weightSizeInBytes,
405
+ const void *weights,
406
+ void *dweights,
407
+ size_t workSpaceSizeInBytes,
408
+ void *workSpace,
409
+ size_t reserveSpaceSizeInBytes,
410
+ void *reserveSpace);
411
+
412
+ /*
413
+ * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions
414
+ */
415
+ /* Input normalization mode for loss function */
416
+ typedef enum {
417
+ CUDNN_LOSS_NORMALIZATION_NONE = 0,
418
+ CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1,
419
+ } cudnnLossNormalizationMode_t;
420
+
421
+ cudnnStatus_t CUDNNWINAPI
422
+ cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc);
423
+
424
+ cudnnStatus_t CUDNNWINAPI
425
+ cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType);
426
+
427
+ cudnnStatus_t CUDNNWINAPI
428
+ cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
429
+ cudnnDataType_t compType,
430
+ cudnnLossNormalizationMode_t normMode,
431
+ cudnnNanPropagation_t gradMode);
432
+
433
+ cudnnStatus_t CUDNNWINAPI
434
+ cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
435
+ cudnnDataType_t compType,
436
+ cudnnLossNormalizationMode_t normMode,
437
+ cudnnNanPropagation_t gradMode,
438
+ int maxLabelLength);
439
+
440
+ cudnnStatus_t CUDNNWINAPI
441
+ cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType);
442
+
443
+ cudnnStatus_t CUDNNWINAPI
444
+ cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
445
+ cudnnDataType_t *compType,
446
+ cudnnLossNormalizationMode_t *normMode,
447
+ cudnnNanPropagation_t *gradMode);
448
+
449
+ cudnnStatus_t CUDNNWINAPI
450
+ cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
451
+ cudnnDataType_t *compType,
452
+ cudnnLossNormalizationMode_t *normMode,
453
+ cudnnNanPropagation_t *gradMode,
454
+ int *maxLabelLength);
455
+
456
+ cudnnStatus_t CUDNNWINAPI
457
+ cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc);
458
+
459
+ /* return the ctc costs and gradients, given the probabilities and labels */
460
+ cudnnStatus_t CUDNNWINAPI
461
+ cudnnCTCLoss(
462
+ cudnnHandle_t handle,
463
+ const cudnnTensorDescriptor_t
464
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
465
+ mini batch size, A is the alphabet size) */
466
+ const void *probs, /* probabilities after softmax, in GPU memory */
467
+ const int hostLabels[], /* labels, in CPU memory */
468
+ const int hostLabelLengths[], /* the length of each label, in CPU memory */
469
+ const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */
470
+ void *costs, /* the returned costs of CTC, in GPU memory */
471
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
472
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
473
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
474
+ cudnnCTCLossDescriptor_t ctcLossDesc,
475
+ void *workspace, /* pointer to the workspace, in GPU memory */
476
+ size_t workSpaceSizeInBytes); /* size of the workspace */
477
+
478
+ /* return the ctc costs and gradients, given the probabilities and labels */
479
+ cudnnStatus_t CUDNNWINAPI
480
+ cudnnCTCLoss_v8(
481
+ cudnnHandle_t handle,
482
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
483
+ cudnnCTCLossDescriptor_t ctcLossDesc,
484
+ const cudnnTensorDescriptor_t
485
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
486
+ mini batch size, A is the alphabet size) */
487
+ const void *probs, /* probabilities after softmax, in GPU memory */
488
+ const int labels[], /* labels, in GPU memory */
489
+ const int labelLengths[], /* the length of each label, in GPU memory */
490
+ const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */
491
+ void *costs, /* the returned costs of CTC, in GPU memory */
492
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
493
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
494
+ size_t workSpaceSizeInBytes, /* size of the workspace */
495
+ void *workspace); /* pointer to the workspace, in GPU memory */
496
+
497
+ /* return the workspace size needed for ctc */
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnGetCTCLossWorkspaceSize(
500
+ cudnnHandle_t handle,
501
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
502
+ timing steps, N is the mini batch size, A is the alphabet size) */
503
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
504
+ dimensions are T,N,A. To compute costs
505
+ only, set it to NULL */
506
+ const int *labels, /* labels, in CPU memory */
507
+ const int *labelLengths, /* the length of each label, in CPU memory */
508
+ const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */
509
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
510
+ cudnnCTCLossDescriptor_t ctcLossDesc,
511
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
512
+
513
+ /* return the workspace size needed for ctc */
514
+ cudnnStatus_t CUDNNWINAPI
515
+ cudnnGetCTCLossWorkspaceSize_v8(
516
+ cudnnHandle_t handle,
517
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
518
+ cudnnCTCLossDescriptor_t ctcLossDesc,
519
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
520
+ timing steps, N is the mini batch size, A is the alphabet size) */
521
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
522
+ dimensions are T,N,A. To compute costs
523
+ only, set it to NULL */
524
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
525
+
526
+ /*
527
+ * \brief Cross-library version checker.
528
+ * This function is implemented differently in each sub-library. Each sublib
529
+ * checks whether its own version matches that of its dependencies.
530
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
531
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
532
+ */
533
+ cudnnStatus_t CUDNNWINAPI
534
+ cudnnAdvTrainVersionCheck(void);
535
+
536
+ #if defined(__cplusplus)
537
+ }
538
+ #endif
539
+
540
+ #endif /* CUDNN_ADV_TRAIN_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn_adv_train : cuDNN's advanced and experimental features.
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_ADV_TRAIN_H_)
55
+ #define CUDNN_ADV_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+
65
+ /* These version numbers are autogenerated, do not edit manually. */
66
+ #define CUDNN_ADV_TRAIN_MAJOR 8
67
+ #define CUDNN_ADV_TRAIN_MINOR 9
68
+ #define CUDNN_ADV_TRAIN_PATCH 2
69
+
70
+ #if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \
71
+ (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL)
72
+ #error Version mismatch in cuDNN ADV TRAIN!!!
73
+ #endif
74
+
75
+ #if defined(__cplusplus)
76
+ extern "C" {
77
+ #endif
78
+
79
+ typedef enum {
80
+ CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */
81
+ CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */
82
+ } cudnnWgradMode_t;
83
+
84
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
85
+ cudnnRNNForwardTraining(cudnnHandle_t handle,
86
+ const cudnnRNNDescriptor_t rnnDesc,
87
+ const int seqLength,
88
+ const cudnnTensorDescriptor_t *xDesc,
89
+ const void *x,
90
+ const cudnnTensorDescriptor_t hxDesc,
91
+ const void *hx,
92
+ const cudnnTensorDescriptor_t cxDesc,
93
+ const void *cx,
94
+ const cudnnFilterDescriptor_t wDesc,
95
+ const void *w,
96
+ const cudnnTensorDescriptor_t *yDesc,
97
+ void *y,
98
+ const cudnnTensorDescriptor_t hyDesc,
99
+ void *hy,
100
+ const cudnnTensorDescriptor_t cyDesc,
101
+ void *cy,
102
+ void *workSpace,
103
+ size_t workSpaceSizeInBytes,
104
+ void *reserveSpace,
105
+ size_t reserveSpaceSizeInBytes);
106
+
107
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
108
+ cudnnRNNBackwardData(cudnnHandle_t handle,
109
+ const cudnnRNNDescriptor_t rnnDesc,
110
+ const int seqLength,
111
+ const cudnnTensorDescriptor_t *yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t *dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t dhyDesc,
116
+ const void *dhy,
117
+ const cudnnTensorDescriptor_t dcyDesc,
118
+ const void *dcy,
119
+ const cudnnFilterDescriptor_t wDesc,
120
+ const void *w,
121
+ const cudnnTensorDescriptor_t hxDesc,
122
+ const void *hx,
123
+ const cudnnTensorDescriptor_t cxDesc,
124
+ const void *cx,
125
+ const cudnnTensorDescriptor_t *dxDesc,
126
+ void *dx,
127
+ const cudnnTensorDescriptor_t dhxDesc,
128
+ void *dhx,
129
+ const cudnnTensorDescriptor_t dcxDesc,
130
+ void *dcx,
131
+ void *workSpace,
132
+ size_t workSpaceSizeInBytes,
133
+ void *reserveSpace,
134
+ size_t reserveSpaceSizeInBytes);
135
+
136
+ cudnnStatus_t CUDNNWINAPI
137
+ cudnnRNNBackwardData_v8(cudnnHandle_t handle,
138
+ cudnnRNNDescriptor_t rnnDesc,
139
+ const int32_t devSeqLengths[],
140
+ cudnnRNNDataDescriptor_t yDesc,
141
+ const void *y,
142
+ const void *dy,
143
+ cudnnRNNDataDescriptor_t xDesc,
144
+ void *dx,
145
+ cudnnTensorDescriptor_t hDesc,
146
+ const void *hx,
147
+ const void *dhy,
148
+ void *dhx,
149
+ cudnnTensorDescriptor_t cDesc,
150
+ const void *cx,
151
+ const void *dcy,
152
+ void *dcx,
153
+ size_t weightSpaceSize,
154
+ const void *weightSpace,
155
+ size_t workSpaceSize,
156
+ void *workSpace,
157
+ size_t reserveSpaceSize,
158
+ void *reserveSpace);
159
+
160
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
161
+ cudnnRNNBackwardWeights(cudnnHandle_t handle,
162
+ const cudnnRNNDescriptor_t rnnDesc,
163
+ const int seqLength,
164
+ const cudnnTensorDescriptor_t *xDesc,
165
+ const void *x,
166
+ const cudnnTensorDescriptor_t hxDesc,
167
+ const void *hx,
168
+ const cudnnTensorDescriptor_t *yDesc,
169
+ const void *y,
170
+ const void *workSpace,
171
+ size_t workSpaceSizeInBytes,
172
+ const cudnnFilterDescriptor_t dwDesc,
173
+ void *dw,
174
+ const void *reserveSpace,
175
+ size_t reserveSpaceSizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnRNNBackwardWeights_v8(cudnnHandle_t handle,
179
+ cudnnRNNDescriptor_t rnnDesc,
180
+ cudnnWgradMode_t addGrad,
181
+ const int32_t devSeqLengths[],
182
+ cudnnRNNDataDescriptor_t xDesc,
183
+ const void *x,
184
+ cudnnTensorDescriptor_t hDesc,
185
+ const void *hx,
186
+ cudnnRNNDataDescriptor_t yDesc,
187
+ const void *y,
188
+ size_t weightSpaceSize,
189
+ void *dweightSpace,
190
+ size_t workSpaceSize,
191
+ void *workSpace,
192
+ size_t reserveSpaceSize,
193
+ void *reserveSpace);
194
+
195
+ /* RNN EX API */
196
+
197
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
198
+ cudnnRNNForwardTrainingEx(cudnnHandle_t handle,
199
+ const cudnnRNNDescriptor_t rnnDesc,
200
+ const cudnnRNNDataDescriptor_t xDesc,
201
+ const void *x,
202
+ const cudnnTensorDescriptor_t hxDesc,
203
+ const void *hx,
204
+ const cudnnTensorDescriptor_t cxDesc,
205
+ const void *cx,
206
+ const cudnnFilterDescriptor_t wDesc,
207
+ const void *w,
208
+ const cudnnRNNDataDescriptor_t yDesc,
209
+ void *y,
210
+ const cudnnTensorDescriptor_t hyDesc,
211
+ void *hy,
212
+ const cudnnTensorDescriptor_t cyDesc,
213
+ void *cy,
214
+ const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */
215
+ const void *keys, /* reserved, should pass NULL */
216
+ const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */
217
+ void *cAttn, /* reserved, should pass NULL */
218
+ const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */
219
+ void *iAttn, /* reserved, should pass NULL */
220
+ const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */
221
+ void *queries, /* reserved, should pass NULL */
222
+ void *workSpace,
223
+ size_t workSpaceSizeInBytes,
224
+ void *reserveSpace,
225
+ size_t reserveSpaceSizeInBytes);
226
+
227
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
228
+ cudnnRNNBackwardDataEx(cudnnHandle_t handle,
229
+ const cudnnRNNDescriptor_t rnnDesc,
230
+ const cudnnRNNDataDescriptor_t yDesc,
231
+ const void *y,
232
+ const cudnnRNNDataDescriptor_t dyDesc,
233
+ const void *dy,
234
+ const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */
235
+ const void *dcAttn, /* reserved, should pass NULL */
236
+ const cudnnTensorDescriptor_t dhyDesc,
237
+ const void *dhy,
238
+ const cudnnTensorDescriptor_t dcyDesc,
239
+ const void *dcy,
240
+ const cudnnFilterDescriptor_t wDesc,
241
+ const void *w,
242
+ const cudnnTensorDescriptor_t hxDesc,
243
+ const void *hx,
244
+ const cudnnTensorDescriptor_t cxDesc,
245
+ const void *cx,
246
+ const cudnnRNNDataDescriptor_t dxDesc,
247
+ void *dx,
248
+ const cudnnTensorDescriptor_t dhxDesc,
249
+ void *dhx,
250
+ const cudnnTensorDescriptor_t dcxDesc,
251
+ void *dcx,
252
+ const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */
253
+ void *dkeys, /* reserved, should pass NULL */
254
+ void *workSpace,
255
+ size_t workSpaceSizeInBytes,
256
+ void *reserveSpace,
257
+ size_t reserveSpaceSizeInBytes);
258
+
259
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
260
+ cudnnRNNBackwardWeightsEx(cudnnHandle_t handle,
261
+ const cudnnRNNDescriptor_t rnnDesc,
262
+ const cudnnRNNDataDescriptor_t xDesc,
263
+ const void *x,
264
+ const cudnnTensorDescriptor_t hxDesc,
265
+ const void *hx,
266
+ const cudnnRNNDataDescriptor_t yDesc,
267
+ const void *y,
268
+ void *workSpace,
269
+ size_t workSpaceSizeInBytes,
270
+ const cudnnFilterDescriptor_t dwDesc,
271
+ void *dw,
272
+ void *reserveSpace,
273
+ size_t reserveSpaceSizeInBytes);
274
+
275
+ /* RNN FIND API */
276
+
277
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
278
+ cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
279
+
280
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
281
+ cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle,
282
+ const cudnnRNNDescriptor_t rnnDesc,
283
+ const int seqLength,
284
+ const cudnnTensorDescriptor_t *xDesc,
285
+ const void *x,
286
+ const cudnnTensorDescriptor_t hxDesc,
287
+ const void *hx,
288
+ const cudnnTensorDescriptor_t cxDesc,
289
+ const void *cx,
290
+ const cudnnFilterDescriptor_t wDesc,
291
+ const void *w,
292
+ const cudnnTensorDescriptor_t *yDesc,
293
+ void *y,
294
+ const cudnnTensorDescriptor_t hyDesc,
295
+ void *hy,
296
+ const cudnnTensorDescriptor_t cyDesc,
297
+ void *cy,
298
+ const float findIntensity,
299
+ const int requestedAlgoCount,
300
+ int *returnedAlgoCount,
301
+ cudnnAlgorithmPerformance_t *perfResults,
302
+ void *workspace,
303
+ size_t workSpaceSizeInBytes,
304
+ void *reserveSpace,
305
+ size_t reserveSpaceSizeInBytes);
306
+
307
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
308
+ cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
309
+
310
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
311
+ cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle,
312
+ const cudnnRNNDescriptor_t rnnDesc,
313
+ const int seqLength,
314
+ const cudnnTensorDescriptor_t *yDesc,
315
+ const void *y,
316
+ const cudnnTensorDescriptor_t *dyDesc,
317
+ const void *dy,
318
+ const cudnnTensorDescriptor_t dhyDesc,
319
+ const void *dhy,
320
+ const cudnnTensorDescriptor_t dcyDesc,
321
+ const void *dcy,
322
+ const cudnnFilterDescriptor_t wDesc,
323
+ const void *w,
324
+ const cudnnTensorDescriptor_t hxDesc,
325
+ const void *hx,
326
+ const cudnnTensorDescriptor_t cxDesc,
327
+ const void *cx,
328
+ const cudnnTensorDescriptor_t *dxDesc,
329
+ void *dx,
330
+ const cudnnTensorDescriptor_t dhxDesc,
331
+ void *dhx,
332
+ const cudnnTensorDescriptor_t dcxDesc,
333
+ void *dcx,
334
+ const float findIntensity,
335
+ const int requestedAlgoCount,
336
+ int *returnedAlgoCount,
337
+ cudnnAlgorithmPerformance_t *perfResults,
338
+ void *workspace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count);
345
+
346
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
347
+ cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle,
348
+ const cudnnRNNDescriptor_t rnnDesc,
349
+ const int seqLength,
350
+ const cudnnTensorDescriptor_t *xDesc,
351
+ const void *x,
352
+ const cudnnTensorDescriptor_t hxDesc,
353
+ const void *hx,
354
+ const cudnnTensorDescriptor_t *yDesc,
355
+ const void *y,
356
+ const float findIntensity,
357
+ const int requestedAlgoCount,
358
+ int *returnedAlgoCount,
359
+ cudnnAlgorithmPerformance_t *perfResults,
360
+ const void *workspace,
361
+ size_t workSpaceSizeInBytes,
362
+ const cudnnFilterDescriptor_t dwDesc,
363
+ void *dw,
364
+ const void *reserveSpace,
365
+ size_t reserveSpaceSizeInBytes);
366
+
367
+ cudnnStatus_t CUDNNWINAPI
368
+ cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle,
369
+ const cudnnAttnDescriptor_t attnDesc,
370
+ const int loWinIdx[],
371
+ const int hiWinIdx[],
372
+ const int devSeqLengthsDQDO[],
373
+ const int devSeqLengthsDKDV[],
374
+ const cudnnSeqDataDescriptor_t doDesc,
375
+ const void *dout,
376
+ const cudnnSeqDataDescriptor_t dqDesc,
377
+ void *dqueries,
378
+ const void *queries,
379
+ const cudnnSeqDataDescriptor_t dkDesc,
380
+ void *dkeys,
381
+ const void *keys,
382
+ const cudnnSeqDataDescriptor_t dvDesc,
383
+ void *dvalues,
384
+ const void *values,
385
+ size_t weightSizeInBytes,
386
+ const void *weights,
387
+ size_t workSpaceSizeInBytes,
388
+ void *workSpace,
389
+ size_t reserveSpaceSizeInBytes,
390
+ void *reserveSpace);
391
+
392
+ cudnnStatus_t CUDNNWINAPI
393
+ cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle,
394
+ const cudnnAttnDescriptor_t attnDesc,
395
+ cudnnWgradMode_t addGrad,
396
+ const cudnnSeqDataDescriptor_t qDesc,
397
+ const void *queries,
398
+ const cudnnSeqDataDescriptor_t kDesc,
399
+ const void *keys,
400
+ const cudnnSeqDataDescriptor_t vDesc,
401
+ const void *values,
402
+ const cudnnSeqDataDescriptor_t doDesc,
403
+ const void *dout,
404
+ size_t weightSizeInBytes,
405
+ const void *weights,
406
+ void *dweights,
407
+ size_t workSpaceSizeInBytes,
408
+ void *workSpace,
409
+ size_t reserveSpaceSizeInBytes,
410
+ void *reserveSpace);
411
+
412
+ /*
413
+ * CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions
414
+ */
415
+ /* Input normalization mode for loss function */
416
+ typedef enum {
417
+ CUDNN_LOSS_NORMALIZATION_NONE = 0,
418
+ CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1,
419
+ } cudnnLossNormalizationMode_t;
420
+
421
+ cudnnStatus_t CUDNNWINAPI
422
+ cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc);
423
+
424
+ cudnnStatus_t CUDNNWINAPI
425
+ cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType);
426
+
427
+ cudnnStatus_t CUDNNWINAPI
428
+ cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
429
+ cudnnDataType_t compType,
430
+ cudnnLossNormalizationMode_t normMode,
431
+ cudnnNanPropagation_t gradMode);
432
+
433
+ cudnnStatus_t CUDNNWINAPI
434
+ cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
435
+ cudnnDataType_t compType,
436
+ cudnnLossNormalizationMode_t normMode,
437
+ cudnnNanPropagation_t gradMode,
438
+ int maxLabelLength);
439
+
440
+ cudnnStatus_t CUDNNWINAPI
441
+ cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType);
442
+
443
+ cudnnStatus_t CUDNNWINAPI
444
+ cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc,
445
+ cudnnDataType_t *compType,
446
+ cudnnLossNormalizationMode_t *normMode,
447
+ cudnnNanPropagation_t *gradMode);
448
+
449
+ cudnnStatus_t CUDNNWINAPI
450
+ cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc,
451
+ cudnnDataType_t *compType,
452
+ cudnnLossNormalizationMode_t *normMode,
453
+ cudnnNanPropagation_t *gradMode,
454
+ int *maxLabelLength);
455
+
456
+ cudnnStatus_t CUDNNWINAPI
457
+ cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc);
458
+
459
+ /* return the ctc costs and gradients, given the probabilities and labels */
460
+ cudnnStatus_t CUDNNWINAPI
461
+ cudnnCTCLoss(
462
+ cudnnHandle_t handle,
463
+ const cudnnTensorDescriptor_t
464
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
465
+ mini batch size, A is the alphabet size) */
466
+ const void *probs, /* probabilities after softmax, in GPU memory */
467
+ const int hostLabels[], /* labels, in CPU memory */
468
+ const int hostLabelLengths[], /* the length of each label, in CPU memory */
469
+ const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */
470
+ void *costs, /* the returned costs of CTC, in GPU memory */
471
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
472
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
473
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
474
+ cudnnCTCLossDescriptor_t ctcLossDesc,
475
+ void *workspace, /* pointer to the workspace, in GPU memory */
476
+ size_t workSpaceSizeInBytes); /* size of the workspace */
477
+
478
+ /* return the ctc costs and gradients, given the probabilities and labels */
479
+ cudnnStatus_t CUDNNWINAPI
480
+ cudnnCTCLoss_v8(
481
+ cudnnHandle_t handle,
482
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
483
+ cudnnCTCLossDescriptor_t ctcLossDesc,
484
+ const cudnnTensorDescriptor_t
485
+ probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the
486
+ mini batch size, A is the alphabet size) */
487
+ const void *probs, /* probabilities after softmax, in GPU memory */
488
+ const int labels[], /* labels, in GPU memory */
489
+ const int labelLengths[], /* the length of each label, in GPU memory */
490
+ const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */
491
+ void *costs, /* the returned costs of CTC, in GPU memory */
492
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */
493
+ void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */
494
+ size_t workSpaceSizeInBytes, /* size of the workspace */
495
+ void *workspace); /* pointer to the workspace, in GPU memory */
496
+
497
+ /* return the workspace size needed for ctc */
498
+ cudnnStatus_t CUDNNWINAPI
499
+ cudnnGetCTCLossWorkspaceSize(
500
+ cudnnHandle_t handle,
501
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
502
+ timing steps, N is the mini batch size, A is the alphabet size) */
503
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
504
+ dimensions are T,N,A. To compute costs
505
+ only, set it to NULL */
506
+ const int *labels, /* labels, in CPU memory */
507
+ const int *labelLengths, /* the length of each label, in CPU memory */
508
+ const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */
509
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
510
+ cudnnCTCLossDescriptor_t ctcLossDesc,
511
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
512
+
513
+ /* return the workspace size needed for ctc */
514
+ cudnnStatus_t CUDNNWINAPI
515
+ cudnnGetCTCLossWorkspaceSize_v8(
516
+ cudnnHandle_t handle,
517
+ cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */
518
+ cudnnCTCLossDescriptor_t ctcLossDesc,
519
+ const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the
520
+ timing steps, N is the mini batch size, A is the alphabet size) */
521
+ const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the
522
+ dimensions are T,N,A. To compute costs
523
+ only, set it to NULL */
524
+ size_t *sizeInBytes); /* pointer to the returned workspace size */
525
+
526
+ /*
527
+ * \brief Cross-library version checker.
528
+ * This function is implemented differently in each sub-library. Each sublib
529
+ * checks whether its own version matches that of its dependencies.
530
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
531
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
532
+ */
533
+ cudnnStatus_t CUDNNWINAPI
534
+ cudnnAdvTrainVersionCheck(void);
535
+
536
+ #if defined(__cplusplus)
537
+ }
538
+ #endif
539
+
540
+ #endif /* CUDNN_ADV_TRAIN_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDNN_BACKEND_H_
51
+ #define _CUDNN_BACKEND_H_
52
+
53
+ /*
54
+ * The content in this header file is under development to be included in cudnn.h in the future
55
+ * Production code should have all include of this header file remove.
56
+ */
57
+
58
+ #include "cudnn_ops_infer.h"
59
+ #include "cudnn_cnn_infer.h"
60
+
61
+ /* NOTE: definition in extern "C" to be copied later to public header */
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif
65
+
66
+ typedef void *cudnnBackendDescriptor_t;
67
+
68
+ typedef struct cudnnFractionStruct {
69
+ int64_t numerator;
70
+ int64_t denominator;
71
+ } cudnnFraction_t;
72
+
73
+ typedef enum {
74
+ CUDNN_POINTWISE_ADD = 0,
75
+ CUDNN_POINTWISE_ADD_SQUARE = 5,
76
+ CUDNN_POINTWISE_DIV = 6,
77
+ CUDNN_POINTWISE_MAX = 3,
78
+ CUDNN_POINTWISE_MIN = 2,
79
+ CUDNN_POINTWISE_MOD = 7,
80
+ CUDNN_POINTWISE_MUL = 1,
81
+ CUDNN_POINTWISE_POW = 8,
82
+ CUDNN_POINTWISE_SUB = 9,
83
+
84
+ CUDNN_POINTWISE_ABS = 10,
85
+ CUDNN_POINTWISE_CEIL = 11,
86
+ CUDNN_POINTWISE_COS = 12,
87
+ CUDNN_POINTWISE_EXP = 13,
88
+ CUDNN_POINTWISE_FLOOR = 14,
89
+ CUDNN_POINTWISE_LOG = 15,
90
+ CUDNN_POINTWISE_NEG = 16,
91
+ CUDNN_POINTWISE_RSQRT = 17,
92
+ CUDNN_POINTWISE_SIN = 18,
93
+ CUDNN_POINTWISE_SQRT = 4,
94
+ CUDNN_POINTWISE_TAN = 19,
95
+ CUDNN_POINTWISE_ERF = 20,
96
+ CUDNN_POINTWISE_IDENTITY = 21,
97
+ CUDNN_POINTWISE_RECIPROCAL = 22,
98
+
99
+ CUDNN_POINTWISE_RELU_FWD = 100,
100
+ CUDNN_POINTWISE_TANH_FWD = 101,
101
+ CUDNN_POINTWISE_SIGMOID_FWD = 102,
102
+ CUDNN_POINTWISE_ELU_FWD = 103,
103
+ CUDNN_POINTWISE_GELU_FWD = 104,
104
+ CUDNN_POINTWISE_SOFTPLUS_FWD = 105,
105
+ CUDNN_POINTWISE_SWISH_FWD = 106,
106
+ CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107,
107
+
108
+ CUDNN_POINTWISE_RELU_BWD = 200,
109
+ CUDNN_POINTWISE_TANH_BWD = 201,
110
+ CUDNN_POINTWISE_SIGMOID_BWD = 202,
111
+ CUDNN_POINTWISE_ELU_BWD = 203,
112
+ CUDNN_POINTWISE_GELU_BWD = 204,
113
+ CUDNN_POINTWISE_SOFTPLUS_BWD = 205,
114
+ CUDNN_POINTWISE_SWISH_BWD = 206,
115
+ CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207,
116
+
117
+ CUDNN_POINTWISE_CMP_EQ = 300,
118
+ CUDNN_POINTWISE_CMP_NEQ = 301,
119
+ CUDNN_POINTWISE_CMP_GT = 302,
120
+ CUDNN_POINTWISE_CMP_GE = 303,
121
+ CUDNN_POINTWISE_CMP_LT = 304,
122
+ CUDNN_POINTWISE_CMP_LE = 305,
123
+
124
+ CUDNN_POINTWISE_LOGICAL_AND = 400,
125
+ CUDNN_POINTWISE_LOGICAL_OR = 401,
126
+ CUDNN_POINTWISE_LOGICAL_NOT = 402,
127
+
128
+ CUDNN_POINTWISE_GEN_INDEX = 501,
129
+
130
+ CUDNN_POINTWISE_BINARY_SELECT = 601,
131
+ } cudnnPointwiseMode_t;
132
+
133
+ typedef enum {
134
+ CUDNN_RESAMPLE_NEAREST = 0,
135
+ CUDNN_RESAMPLE_BILINEAR = 1,
136
+ CUDNN_RESAMPLE_AVGPOOL = 2,
137
+ CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2,
138
+ CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4,
139
+ CUDNN_RESAMPLE_MAXPOOL = 3,
140
+ } cudnnResampleMode_t;
141
+
142
+ typedef enum {
143
+ CUDNN_SIGNAL_SET = 0,
144
+ CUDNN_SIGNAL_WAIT = 1,
145
+ } cudnnSignalMode_t;
146
+
147
+ typedef enum {
148
+ CUDNN_GENSTATS_SUM_SQSUM = 0,
149
+ } cudnnGenStatsMode_t;
150
+
151
+ typedef enum {
152
+ CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0,
153
+ CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1,
154
+ } cudnnBnFinalizeStatsMode_t;
155
+
156
+ typedef enum {
157
+ CUDNN_RNG_DISTRIBUTION_BERNOULLI,
158
+ CUDNN_RNG_DISTRIBUTION_UNIFORM,
159
+ CUDNN_RNG_DISTRIBUTION_NORMAL,
160
+ } cudnnRngDistribution_t;
161
+
162
+ typedef enum {
163
+ CUDNN_ATTR_POINTWISE_MODE = 0,
164
+ CUDNN_ATTR_POINTWISE_MATH_PREC = 1,
165
+ CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2,
166
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3,
167
+ CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4,
168
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5,
169
+ CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6,
170
+ CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7,
171
+ CUDNN_ATTR_POINTWISE_SWISH_BETA = 8,
172
+ CUDNN_ATTR_POINTWISE_AXIS = 9,
173
+
174
+ CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100,
175
+ CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101,
176
+ CUDNN_ATTR_CONVOLUTION_DILATIONS = 102,
177
+ CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103,
178
+ CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104,
179
+ CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105,
180
+ CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106,
181
+
182
+ CUDNN_ATTR_ENGINEHEUR_MODE = 200,
183
+ CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201,
184
+ CUDNN_ATTR_ENGINEHEUR_RESULTS = 202,
185
+
186
+ CUDNN_ATTR_ENGINECFG_ENGINE = 300,
187
+ CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301,
188
+ CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302,
189
+
190
+ CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400,
191
+ CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401,
192
+ CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402,
193
+ CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403,
194
+ CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404,
195
+ CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405,
196
+
197
+ CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500,
198
+ CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501,
199
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502,
200
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503,
201
+
202
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600,
203
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601,
204
+
205
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700,
206
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701,
207
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702,
208
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703,
209
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704,
210
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705,
211
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706,
212
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707,
213
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708,
214
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709,
215
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710,
216
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711,
217
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712,
218
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713,
219
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714,
220
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715,
221
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716,
222
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717,
223
+
224
+ CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750,
225
+ CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751,
226
+ CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752,
227
+ CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753,
228
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754,
229
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755,
230
+ CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756,
231
+ CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757,
232
+ CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758,
233
+
234
+ CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770,
235
+ CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771,
236
+ CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772,
237
+ CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773,
238
+ CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774,
239
+
240
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780,
241
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781,
242
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782,
243
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783,
244
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784,
245
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785,
246
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786,
247
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787,
248
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788,
249
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789,
250
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790,
251
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791,
252
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792,
253
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793,
254
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794,
255
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795,
256
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796,
257
+
258
+ CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800,
259
+ CUDNN_ATTR_OPERATIONGRAPH_OPS = 801,
260
+ CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802,
261
+
262
+ CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900,
263
+ CUDNN_ATTR_TENSOR_DATA_TYPE = 901,
264
+ CUDNN_ATTR_TENSOR_DIMENSIONS = 902,
265
+ CUDNN_ATTR_TENSOR_STRIDES = 903,
266
+ CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904,
267
+ CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905,
268
+ CUDNN_ATTR_TENSOR_UNIQUE_ID = 906,
269
+ CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907,
270
+ CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908,
271
+ CUDNN_ATTR_TENSOR_REORDERING_MODE = 909,
272
+ CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913,
273
+
274
+ CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000,
275
+ CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001,
276
+ CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002,
277
+ CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003,
278
+
279
+ CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100,
280
+ CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101,
281
+
282
+ CUDNN_ATTR_KNOB_INFO_TYPE = 1200,
283
+ CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201,
284
+ CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202,
285
+ CUDNN_ATTR_KNOB_INFO_STRIDE = 1203,
286
+
287
+ CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300,
288
+ CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301,
289
+ CUDNN_ATTR_ENGINE_KNOB_INFO = 1302,
290
+ CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303,
291
+ CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304,
292
+ CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305,
293
+
294
+ CUDNN_ATTR_MATMUL_COMP_TYPE = 1500,
295
+ CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503,
296
+
297
+ CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520,
298
+ CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521,
299
+ CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522,
300
+ CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523,
301
+ CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524,
302
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525,
303
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526,
304
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527,
305
+
306
+ CUDNN_ATTR_REDUCTION_OPERATOR = 1600,
307
+ CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601,
308
+
309
+ CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610,
310
+ CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611,
311
+ CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612,
312
+
313
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620,
314
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621,
315
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622,
316
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623,
317
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624,
318
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625,
319
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626,
320
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627,
321
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628,
322
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629,
323
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630,
324
+
325
+ CUDNN_ATTR_RESAMPLE_MODE = 1700,
326
+ CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701,
327
+ CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702,
328
+ CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703,
329
+ CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704,
330
+ CUDNN_ATTR_RESAMPLE_STRIDES = 1705,
331
+ CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706,
332
+ CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707,
333
+ CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708,
334
+
335
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710,
336
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711,
337
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712,
338
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713,
339
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714,
340
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716,
341
+
342
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720,
343
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721,
344
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722,
345
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723,
346
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724,
347
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725,
348
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726,
349
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727,
350
+
351
+ CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800,
352
+ CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801,
353
+ CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802,
354
+ CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803,
355
+
356
+ CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900,
357
+ CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901,
358
+ CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902,
359
+ CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903,
360
+ CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904,
361
+
362
+ CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000,
363
+ CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001,
364
+ CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002,
365
+ CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003,
366
+ CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004,
367
+ CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005,
368
+ CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006,
369
+ CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007,
370
+ CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008,
371
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009,
372
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010,
373
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011,
374
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012,
375
+ CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013,
376
+ CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014,
377
+
378
+ CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100,
379
+ CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101,
380
+ CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102,
381
+ CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103,
382
+ CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104,
383
+ CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105,
384
+ CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106,
385
+ CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107,
386
+ CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108,
387
+ CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109,
388
+ CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110,
389
+
390
+ CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200,
391
+ CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201,
392
+
393
+ CUDNN_ATTR_RNG_DISTRIBUTION = 2300,
394
+ CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301,
395
+ CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302,
396
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303,
397
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304,
398
+ CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305,
399
+
400
+ CUDNN_ATTR_OPERATION_RNG_YDESC = 2310,
401
+ CUDNN_ATTR_OPERATION_RNG_SEED = 2311,
402
+ CUDNN_ATTR_OPERATION_RNG_DESC = 2312,
403
+ CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313,
404
+
405
+ } cudnnBackendAttributeName_t;
406
+
407
+ typedef enum {
408
+ CUDNN_TYPE_HANDLE = 0,
409
+ CUDNN_TYPE_DATA_TYPE,
410
+ CUDNN_TYPE_BOOLEAN,
411
+ CUDNN_TYPE_INT64,
412
+ CUDNN_TYPE_FLOAT,
413
+ CUDNN_TYPE_DOUBLE,
414
+ CUDNN_TYPE_VOID_PTR,
415
+ CUDNN_TYPE_CONVOLUTION_MODE,
416
+ CUDNN_TYPE_HEUR_MODE,
417
+ CUDNN_TYPE_KNOB_TYPE,
418
+ CUDNN_TYPE_NAN_PROPOGATION,
419
+ CUDNN_TYPE_NUMERICAL_NOTE,
420
+ CUDNN_TYPE_LAYOUT_TYPE,
421
+ CUDNN_TYPE_ATTRIB_NAME,
422
+ CUDNN_TYPE_POINTWISE_MODE,
423
+ CUDNN_TYPE_BACKEND_DESCRIPTOR,
424
+ CUDNN_TYPE_GENSTATS_MODE,
425
+ CUDNN_TYPE_BN_FINALIZE_STATS_MODE,
426
+ CUDNN_TYPE_REDUCTION_OPERATOR_TYPE,
427
+ CUDNN_TYPE_BEHAVIOR_NOTE,
428
+ CUDNN_TYPE_TENSOR_REORDERING_MODE,
429
+ CUDNN_TYPE_RESAMPLE_MODE,
430
+ CUDNN_TYPE_PADDING_MODE,
431
+ CUDNN_TYPE_INT32,
432
+ CUDNN_TYPE_CHAR,
433
+ CUDNN_TYPE_SIGNAL_MODE,
434
+ CUDNN_TYPE_FRACTION,
435
+ CUDNN_TYPE_NORM_MODE,
436
+ CUDNN_TYPE_NORM_FWD_PHASE,
437
+ CUDNN_TYPE_RNG_DISTRIBUTION
438
+ } cudnnBackendAttributeType_t;
439
+
440
+ typedef enum {
441
+ CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0,
442
+ CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR,
443
+ CUDNN_BACKEND_ENGINE_DESCRIPTOR,
444
+ CUDNN_BACKEND_ENGINECFG_DESCRIPTOR,
445
+ CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR,
446
+ CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR,
447
+ CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR,
448
+ CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR,
449
+ CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR,
450
+ CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR,
451
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR,
452
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR,
453
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR,
454
+ CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR,
455
+ CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR,
456
+ CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR,
457
+ CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR,
458
+ CUDNN_BACKEND_TENSOR_DESCRIPTOR,
459
+ CUDNN_BACKEND_MATMUL_DESCRIPTOR,
460
+ CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR,
461
+ CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR,
462
+ CUDNN_BACKEND_REDUCTION_DESCRIPTOR,
463
+ CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR,
464
+ CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR,
465
+ CUDNN_BACKEND_RESAMPLE_DESCRIPTOR,
466
+ CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR,
467
+ CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR,
468
+ CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR,
469
+ CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR,
470
+ CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR,
471
+ CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR,
472
+ CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR,
473
+ CUDNN_BACKEND_RNG_DESCRIPTOR,
474
+ CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR
475
+ } cudnnBackendDescriptorType_t;
476
+
477
+ typedef enum {
478
+ CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0,
479
+ CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS,
480
+ CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION,
481
+ CUDNN_NUMERICAL_NOTE_FFT,
482
+ CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC,
483
+ CUDNN_NUMERICAL_NOTE_WINOGRAD,
484
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4,
485
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6,
486
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13,
487
+ CUDNN_NUMERICAL_NOTE_TYPE_COUNT,
488
+ } cudnnBackendNumericalNote_t;
489
+
490
+ typedef enum {
491
+ CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0,
492
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1,
493
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2,
494
+ CUDNN_BEHAVIOR_NOTE_TYPE_COUNT,
495
+ } cudnnBackendBehaviorNote_t;
496
+
497
+ typedef enum {
498
+ CUDNN_KNOB_TYPE_SPLIT_K = 0,
499
+ CUDNN_KNOB_TYPE_SWIZZLE = 1,
500
+ CUDNN_KNOB_TYPE_TILE_SIZE = 2,
501
+ CUDNN_KNOB_TYPE_USE_TEX = 3,
502
+ CUDNN_KNOB_TYPE_EDGE = 4,
503
+ CUDNN_KNOB_TYPE_KBLOCK = 5,
504
+ CUDNN_KNOB_TYPE_LDGA = 6,
505
+ CUDNN_KNOB_TYPE_LDGB = 7,
506
+ CUDNN_KNOB_TYPE_CHUNK_K = 8,
507
+ CUDNN_KNOB_TYPE_SPLIT_H = 9,
508
+ CUDNN_KNOB_TYPE_WINO_TILE = 10,
509
+ CUDNN_KNOB_TYPE_MULTIPLY = 11,
510
+ CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12,
511
+ CUDNN_KNOB_TYPE_TILEK = 13,
512
+ CUDNN_KNOB_TYPE_STAGES = 14,
513
+ CUDNN_KNOB_TYPE_REDUCTION_MODE = 15,
514
+ CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16,
515
+ CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17,
516
+ CUDNN_KNOB_TYPE_IDX_MODE = 18,
517
+ CUDNN_KNOB_TYPE_SLICED = 19,
518
+ CUDNN_KNOB_TYPE_SPLIT_RS = 20,
519
+ CUDNN_KNOB_TYPE_SINGLEBUFFER = 21,
520
+ CUDNN_KNOB_TYPE_LDGC = 22,
521
+ CUDNN_KNOB_TYPE_SPECFILT = 23,
522
+ CUDNN_KNOB_TYPE_KERNEL_CFG = 24,
523
+ CUDNN_KNOB_TYPE_WORKSPACE = 25,
524
+ CUDNN_KNOB_TYPE_TILE_CGA = 26,
525
+ CUDNN_KNOB_TYPE_TILE_CGA_M = 27,
526
+ CUDNN_KNOB_TYPE_TILE_CGA_N = 28,
527
+ CUDNN_KNOB_TYPE_BLOCK_SIZE = 29,
528
+ CUDNN_KNOB_TYPE_OCCUPANCY = 30,
529
+ CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31,
530
+ CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32,
531
+ CUDNN_KNOB_TYPE_COUNTS,
532
+ } cudnnBackendKnobType_t;
533
+
534
+ typedef enum {
535
+ CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0,
536
+ CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1,
537
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2,
538
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3,
539
+ CUDNN_LAYOUT_TYPE_COUNT = 4,
540
+ } cudnnBackendLayoutType_t;
541
+
542
+ typedef enum {
543
+ CUDNN_HEUR_MODE_INSTANT = 0,
544
+ CUDNN_HEUR_MODE_B = 1,
545
+ CUDNN_HEUR_MODE_FALLBACK = 2,
546
+ CUDNN_HEUR_MODE_A = 3,
547
+ CUDNN_HEUR_MODES_COUNT = 4,
548
+ } cudnnBackendHeurMode_t;
549
+
550
+ typedef enum {
551
+ CUDNN_TENSOR_REORDERING_NONE = 0,
552
+ CUDNN_TENSOR_REORDERING_INT8x32 = 1,
553
+ CUDNN_TENSOR_REORDERING_F16x16 = 2,
554
+ } cudnnBackendTensorReordering_t;
555
+
556
+ typedef enum {
557
+ CUDNN_ZERO_PAD = 0,
558
+ CUDNN_NEG_INF_PAD = 1,
559
+ CUDNN_EDGE_VAL_PAD = 2,
560
+ } cudnnPaddingMode_t;
561
+
562
+ typedef enum {
563
+ CUDNN_LAYER_NORM = 0,
564
+ CUDNN_INSTANCE_NORM = 1,
565
+ CUDNN_BATCH_NORM = 2,
566
+ CUDNN_GROUP_NORM = 3,
567
+ } cudnnBackendNormMode_t;
568
+
569
+ typedef enum {
570
+ CUDNN_NORM_FWD_INFERENCE = 0,
571
+ CUDNN_NORM_FWD_TRAINING = 1,
572
+ } cudnnBackendNormFwdPhase_t;
573
+
574
+ cudnnStatus_t CUDNNWINAPI
575
+ cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor);
576
+
577
+ cudnnStatus_t CUDNNWINAPI
578
+ cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor);
579
+
580
+ cudnnStatus_t CUDNNWINAPI
581
+ cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor);
582
+
583
+ cudnnStatus_t CUDNNWINAPI
584
+ cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor);
585
+
586
+ cudnnStatus_t CUDNNWINAPI
587
+ cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor,
588
+ cudnnBackendAttributeName_t attributeName,
589
+ cudnnBackendAttributeType_t attributeType,
590
+ int64_t elementCount,
591
+ const void *arrayOfElements);
592
+
593
+ cudnnStatus_t CUDNNWINAPI
594
+ cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor,
595
+ cudnnBackendAttributeName_t attributeName,
596
+ cudnnBackendAttributeType_t attributeType,
597
+ int64_t requestedElementCount,
598
+ int64_t *elementCount,
599
+ void *arrayOfElements);
600
+
601
+ cudnnStatus_t CUDNNWINAPI
602
+ cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack);
603
+
604
+ #if defined(__cplusplus)
605
+ }
606
+ #endif
607
+
608
+ #endif /* _CUDNN_BACKEND_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDNN_BACKEND_H_
51
+ #define _CUDNN_BACKEND_H_
52
+
53
+ /*
54
+ * The content in this header file is under development to be included in cudnn.h in the future
55
+ * Production code should have all include of this header file remove.
56
+ */
57
+
58
+ #include "cudnn_ops_infer.h"
59
+ #include "cudnn_cnn_infer.h"
60
+
61
+ /* NOTE: definition in extern "C" to be copied later to public header */
62
+ #if defined(__cplusplus)
63
+ extern "C" {
64
+ #endif
65
+
66
+ typedef void *cudnnBackendDescriptor_t;
67
+
68
+ typedef struct cudnnFractionStruct {
69
+ int64_t numerator;
70
+ int64_t denominator;
71
+ } cudnnFraction_t;
72
+
73
+ typedef enum {
74
+ CUDNN_POINTWISE_ADD = 0,
75
+ CUDNN_POINTWISE_ADD_SQUARE = 5,
76
+ CUDNN_POINTWISE_DIV = 6,
77
+ CUDNN_POINTWISE_MAX = 3,
78
+ CUDNN_POINTWISE_MIN = 2,
79
+ CUDNN_POINTWISE_MOD = 7,
80
+ CUDNN_POINTWISE_MUL = 1,
81
+ CUDNN_POINTWISE_POW = 8,
82
+ CUDNN_POINTWISE_SUB = 9,
83
+
84
+ CUDNN_POINTWISE_ABS = 10,
85
+ CUDNN_POINTWISE_CEIL = 11,
86
+ CUDNN_POINTWISE_COS = 12,
87
+ CUDNN_POINTWISE_EXP = 13,
88
+ CUDNN_POINTWISE_FLOOR = 14,
89
+ CUDNN_POINTWISE_LOG = 15,
90
+ CUDNN_POINTWISE_NEG = 16,
91
+ CUDNN_POINTWISE_RSQRT = 17,
92
+ CUDNN_POINTWISE_SIN = 18,
93
+ CUDNN_POINTWISE_SQRT = 4,
94
+ CUDNN_POINTWISE_TAN = 19,
95
+ CUDNN_POINTWISE_ERF = 20,
96
+ CUDNN_POINTWISE_IDENTITY = 21,
97
+ CUDNN_POINTWISE_RECIPROCAL = 22,
98
+
99
+ CUDNN_POINTWISE_RELU_FWD = 100,
100
+ CUDNN_POINTWISE_TANH_FWD = 101,
101
+ CUDNN_POINTWISE_SIGMOID_FWD = 102,
102
+ CUDNN_POINTWISE_ELU_FWD = 103,
103
+ CUDNN_POINTWISE_GELU_FWD = 104,
104
+ CUDNN_POINTWISE_SOFTPLUS_FWD = 105,
105
+ CUDNN_POINTWISE_SWISH_FWD = 106,
106
+ CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107,
107
+
108
+ CUDNN_POINTWISE_RELU_BWD = 200,
109
+ CUDNN_POINTWISE_TANH_BWD = 201,
110
+ CUDNN_POINTWISE_SIGMOID_BWD = 202,
111
+ CUDNN_POINTWISE_ELU_BWD = 203,
112
+ CUDNN_POINTWISE_GELU_BWD = 204,
113
+ CUDNN_POINTWISE_SOFTPLUS_BWD = 205,
114
+ CUDNN_POINTWISE_SWISH_BWD = 206,
115
+ CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207,
116
+
117
+ CUDNN_POINTWISE_CMP_EQ = 300,
118
+ CUDNN_POINTWISE_CMP_NEQ = 301,
119
+ CUDNN_POINTWISE_CMP_GT = 302,
120
+ CUDNN_POINTWISE_CMP_GE = 303,
121
+ CUDNN_POINTWISE_CMP_LT = 304,
122
+ CUDNN_POINTWISE_CMP_LE = 305,
123
+
124
+ CUDNN_POINTWISE_LOGICAL_AND = 400,
125
+ CUDNN_POINTWISE_LOGICAL_OR = 401,
126
+ CUDNN_POINTWISE_LOGICAL_NOT = 402,
127
+
128
+ CUDNN_POINTWISE_GEN_INDEX = 501,
129
+
130
+ CUDNN_POINTWISE_BINARY_SELECT = 601,
131
+ } cudnnPointwiseMode_t;
132
+
133
+ typedef enum {
134
+ CUDNN_RESAMPLE_NEAREST = 0,
135
+ CUDNN_RESAMPLE_BILINEAR = 1,
136
+ CUDNN_RESAMPLE_AVGPOOL = 2,
137
+ CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2,
138
+ CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4,
139
+ CUDNN_RESAMPLE_MAXPOOL = 3,
140
+ } cudnnResampleMode_t;
141
+
142
+ typedef enum {
143
+ CUDNN_SIGNAL_SET = 0,
144
+ CUDNN_SIGNAL_WAIT = 1,
145
+ } cudnnSignalMode_t;
146
+
147
+ typedef enum {
148
+ CUDNN_GENSTATS_SUM_SQSUM = 0,
149
+ } cudnnGenStatsMode_t;
150
+
151
+ typedef enum {
152
+ CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0,
153
+ CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1,
154
+ } cudnnBnFinalizeStatsMode_t;
155
+
156
+ typedef enum {
157
+ CUDNN_RNG_DISTRIBUTION_BERNOULLI,
158
+ CUDNN_RNG_DISTRIBUTION_UNIFORM,
159
+ CUDNN_RNG_DISTRIBUTION_NORMAL,
160
+ } cudnnRngDistribution_t;
161
+
162
+ typedef enum {
163
+ CUDNN_ATTR_POINTWISE_MODE = 0,
164
+ CUDNN_ATTR_POINTWISE_MATH_PREC = 1,
165
+ CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2,
166
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3,
167
+ CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4,
168
+ CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5,
169
+ CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6,
170
+ CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7,
171
+ CUDNN_ATTR_POINTWISE_SWISH_BETA = 8,
172
+ CUDNN_ATTR_POINTWISE_AXIS = 9,
173
+
174
+ CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100,
175
+ CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101,
176
+ CUDNN_ATTR_CONVOLUTION_DILATIONS = 102,
177
+ CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103,
178
+ CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104,
179
+ CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105,
180
+ CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106,
181
+
182
+ CUDNN_ATTR_ENGINEHEUR_MODE = 200,
183
+ CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201,
184
+ CUDNN_ATTR_ENGINEHEUR_RESULTS = 202,
185
+
186
+ CUDNN_ATTR_ENGINECFG_ENGINE = 300,
187
+ CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301,
188
+ CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302,
189
+
190
+ CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400,
191
+ CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401,
192
+ CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402,
193
+ CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403,
194
+ CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404,
195
+ CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405,
196
+
197
+ CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500,
198
+ CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501,
199
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502,
200
+ CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503,
201
+
202
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600,
203
+ CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601,
204
+
205
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700,
206
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701,
207
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702,
208
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703,
209
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704,
210
+ CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705,
211
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706,
212
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707,
213
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708,
214
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709,
215
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710,
216
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711,
217
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712,
218
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713,
219
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714,
220
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715,
221
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716,
222
+ CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717,
223
+
224
+ CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750,
225
+ CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751,
226
+ CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752,
227
+ CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753,
228
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754,
229
+ CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755,
230
+ CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756,
231
+ CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757,
232
+ CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758,
233
+
234
+ CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770,
235
+ CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771,
236
+ CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772,
237
+ CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773,
238
+ CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774,
239
+
240
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780,
241
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781,
242
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782,
243
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783,
244
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784,
245
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785,
246
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786,
247
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787,
248
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788,
249
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789,
250
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790,
251
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791,
252
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792,
253
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793,
254
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794,
255
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795,
256
+ CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796,
257
+
258
+ CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800,
259
+ CUDNN_ATTR_OPERATIONGRAPH_OPS = 801,
260
+ CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802,
261
+
262
+ CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900,
263
+ CUDNN_ATTR_TENSOR_DATA_TYPE = 901,
264
+ CUDNN_ATTR_TENSOR_DIMENSIONS = 902,
265
+ CUDNN_ATTR_TENSOR_STRIDES = 903,
266
+ CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904,
267
+ CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905,
268
+ CUDNN_ATTR_TENSOR_UNIQUE_ID = 906,
269
+ CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907,
270
+ CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908,
271
+ CUDNN_ATTR_TENSOR_REORDERING_MODE = 909,
272
+ CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913,
273
+
274
+ CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000,
275
+ CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001,
276
+ CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002,
277
+ CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003,
278
+
279
+ CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100,
280
+ CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101,
281
+
282
+ CUDNN_ATTR_KNOB_INFO_TYPE = 1200,
283
+ CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201,
284
+ CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202,
285
+ CUDNN_ATTR_KNOB_INFO_STRIDE = 1203,
286
+
287
+ CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300,
288
+ CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301,
289
+ CUDNN_ATTR_ENGINE_KNOB_INFO = 1302,
290
+ CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303,
291
+ CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304,
292
+ CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305,
293
+
294
+ CUDNN_ATTR_MATMUL_COMP_TYPE = 1500,
295
+ CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503,
296
+
297
+ CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520,
298
+ CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521,
299
+ CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522,
300
+ CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523,
301
+ CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524,
302
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525,
303
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526,
304
+ CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527,
305
+
306
+ CUDNN_ATTR_REDUCTION_OPERATOR = 1600,
307
+ CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601,
308
+
309
+ CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610,
310
+ CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611,
311
+ CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612,
312
+
313
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620,
314
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621,
315
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622,
316
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623,
317
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624,
318
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625,
319
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626,
320
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627,
321
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628,
322
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629,
323
+ CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630,
324
+
325
+ CUDNN_ATTR_RESAMPLE_MODE = 1700,
326
+ CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701,
327
+ CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702,
328
+ CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703,
329
+ CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704,
330
+ CUDNN_ATTR_RESAMPLE_STRIDES = 1705,
331
+ CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706,
332
+ CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707,
333
+ CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708,
334
+
335
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710,
336
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711,
337
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712,
338
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713,
339
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714,
340
+ CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716,
341
+
342
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720,
343
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721,
344
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722,
345
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723,
346
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724,
347
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725,
348
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726,
349
+ CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727,
350
+
351
+ CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800,
352
+ CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801,
353
+ CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802,
354
+ CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803,
355
+
356
+ CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900,
357
+ CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901,
358
+ CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902,
359
+ CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903,
360
+ CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904,
361
+
362
+ CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000,
363
+ CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001,
364
+ CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002,
365
+ CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003,
366
+ CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004,
367
+ CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005,
368
+ CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006,
369
+ CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007,
370
+ CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008,
371
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009,
372
+ CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010,
373
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011,
374
+ CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012,
375
+ CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013,
376
+ CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014,
377
+
378
+ CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100,
379
+ CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101,
380
+ CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102,
381
+ CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103,
382
+ CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104,
383
+ CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105,
384
+ CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106,
385
+ CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107,
386
+ CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108,
387
+ CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109,
388
+ CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110,
389
+
390
+ CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200,
391
+ CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201,
392
+
393
+ CUDNN_ATTR_RNG_DISTRIBUTION = 2300,
394
+ CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301,
395
+ CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302,
396
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303,
397
+ CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304,
398
+ CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305,
399
+
400
+ CUDNN_ATTR_OPERATION_RNG_YDESC = 2310,
401
+ CUDNN_ATTR_OPERATION_RNG_SEED = 2311,
402
+ CUDNN_ATTR_OPERATION_RNG_DESC = 2312,
403
+ CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313,
404
+
405
+ } cudnnBackendAttributeName_t;
406
+
407
+ typedef enum {
408
+ CUDNN_TYPE_HANDLE = 0,
409
+ CUDNN_TYPE_DATA_TYPE,
410
+ CUDNN_TYPE_BOOLEAN,
411
+ CUDNN_TYPE_INT64,
412
+ CUDNN_TYPE_FLOAT,
413
+ CUDNN_TYPE_DOUBLE,
414
+ CUDNN_TYPE_VOID_PTR,
415
+ CUDNN_TYPE_CONVOLUTION_MODE,
416
+ CUDNN_TYPE_HEUR_MODE,
417
+ CUDNN_TYPE_KNOB_TYPE,
418
+ CUDNN_TYPE_NAN_PROPOGATION,
419
+ CUDNN_TYPE_NUMERICAL_NOTE,
420
+ CUDNN_TYPE_LAYOUT_TYPE,
421
+ CUDNN_TYPE_ATTRIB_NAME,
422
+ CUDNN_TYPE_POINTWISE_MODE,
423
+ CUDNN_TYPE_BACKEND_DESCRIPTOR,
424
+ CUDNN_TYPE_GENSTATS_MODE,
425
+ CUDNN_TYPE_BN_FINALIZE_STATS_MODE,
426
+ CUDNN_TYPE_REDUCTION_OPERATOR_TYPE,
427
+ CUDNN_TYPE_BEHAVIOR_NOTE,
428
+ CUDNN_TYPE_TENSOR_REORDERING_MODE,
429
+ CUDNN_TYPE_RESAMPLE_MODE,
430
+ CUDNN_TYPE_PADDING_MODE,
431
+ CUDNN_TYPE_INT32,
432
+ CUDNN_TYPE_CHAR,
433
+ CUDNN_TYPE_SIGNAL_MODE,
434
+ CUDNN_TYPE_FRACTION,
435
+ CUDNN_TYPE_NORM_MODE,
436
+ CUDNN_TYPE_NORM_FWD_PHASE,
437
+ CUDNN_TYPE_RNG_DISTRIBUTION
438
+ } cudnnBackendAttributeType_t;
439
+
440
+ typedef enum {
441
+ CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0,
442
+ CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR,
443
+ CUDNN_BACKEND_ENGINE_DESCRIPTOR,
444
+ CUDNN_BACKEND_ENGINECFG_DESCRIPTOR,
445
+ CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR,
446
+ CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR,
447
+ CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR,
448
+ CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR,
449
+ CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR,
450
+ CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR,
451
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR,
452
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR,
453
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR,
454
+ CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR,
455
+ CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR,
456
+ CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR,
457
+ CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR,
458
+ CUDNN_BACKEND_TENSOR_DESCRIPTOR,
459
+ CUDNN_BACKEND_MATMUL_DESCRIPTOR,
460
+ CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR,
461
+ CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR,
462
+ CUDNN_BACKEND_REDUCTION_DESCRIPTOR,
463
+ CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR,
464
+ CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR,
465
+ CUDNN_BACKEND_RESAMPLE_DESCRIPTOR,
466
+ CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR,
467
+ CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR,
468
+ CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR,
469
+ CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR,
470
+ CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR,
471
+ CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR,
472
+ CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR,
473
+ CUDNN_BACKEND_RNG_DESCRIPTOR,
474
+ CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR
475
+ } cudnnBackendDescriptorType_t;
476
+
477
+ typedef enum {
478
+ CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0,
479
+ CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS,
480
+ CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION,
481
+ CUDNN_NUMERICAL_NOTE_FFT,
482
+ CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC,
483
+ CUDNN_NUMERICAL_NOTE_WINOGRAD,
484
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4,
485
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6,
486
+ CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13,
487
+ CUDNN_NUMERICAL_NOTE_TYPE_COUNT,
488
+ } cudnnBackendNumericalNote_t;
489
+
490
+ typedef enum {
491
+ CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0,
492
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1,
493
+ CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2,
494
+ CUDNN_BEHAVIOR_NOTE_TYPE_COUNT,
495
+ } cudnnBackendBehaviorNote_t;
496
+
497
+ typedef enum {
498
+ CUDNN_KNOB_TYPE_SPLIT_K = 0,
499
+ CUDNN_KNOB_TYPE_SWIZZLE = 1,
500
+ CUDNN_KNOB_TYPE_TILE_SIZE = 2,
501
+ CUDNN_KNOB_TYPE_USE_TEX = 3,
502
+ CUDNN_KNOB_TYPE_EDGE = 4,
503
+ CUDNN_KNOB_TYPE_KBLOCK = 5,
504
+ CUDNN_KNOB_TYPE_LDGA = 6,
505
+ CUDNN_KNOB_TYPE_LDGB = 7,
506
+ CUDNN_KNOB_TYPE_CHUNK_K = 8,
507
+ CUDNN_KNOB_TYPE_SPLIT_H = 9,
508
+ CUDNN_KNOB_TYPE_WINO_TILE = 10,
509
+ CUDNN_KNOB_TYPE_MULTIPLY = 11,
510
+ CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12,
511
+ CUDNN_KNOB_TYPE_TILEK = 13,
512
+ CUDNN_KNOB_TYPE_STAGES = 14,
513
+ CUDNN_KNOB_TYPE_REDUCTION_MODE = 15,
514
+ CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16,
515
+ CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17,
516
+ CUDNN_KNOB_TYPE_IDX_MODE = 18,
517
+ CUDNN_KNOB_TYPE_SLICED = 19,
518
+ CUDNN_KNOB_TYPE_SPLIT_RS = 20,
519
+ CUDNN_KNOB_TYPE_SINGLEBUFFER = 21,
520
+ CUDNN_KNOB_TYPE_LDGC = 22,
521
+ CUDNN_KNOB_TYPE_SPECFILT = 23,
522
+ CUDNN_KNOB_TYPE_KERNEL_CFG = 24,
523
+ CUDNN_KNOB_TYPE_WORKSPACE = 25,
524
+ CUDNN_KNOB_TYPE_TILE_CGA = 26,
525
+ CUDNN_KNOB_TYPE_TILE_CGA_M = 27,
526
+ CUDNN_KNOB_TYPE_TILE_CGA_N = 28,
527
+ CUDNN_KNOB_TYPE_BLOCK_SIZE = 29,
528
+ CUDNN_KNOB_TYPE_OCCUPANCY = 30,
529
+ CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31,
530
+ CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32,
531
+ CUDNN_KNOB_TYPE_COUNTS,
532
+ } cudnnBackendKnobType_t;
533
+
534
+ typedef enum {
535
+ CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0,
536
+ CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1,
537
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2,
538
+ CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3,
539
+ CUDNN_LAYOUT_TYPE_COUNT = 4,
540
+ } cudnnBackendLayoutType_t;
541
+
542
+ typedef enum {
543
+ CUDNN_HEUR_MODE_INSTANT = 0,
544
+ CUDNN_HEUR_MODE_B = 1,
545
+ CUDNN_HEUR_MODE_FALLBACK = 2,
546
+ CUDNN_HEUR_MODE_A = 3,
547
+ CUDNN_HEUR_MODES_COUNT = 4,
548
+ } cudnnBackendHeurMode_t;
549
+
550
+ typedef enum {
551
+ CUDNN_TENSOR_REORDERING_NONE = 0,
552
+ CUDNN_TENSOR_REORDERING_INT8x32 = 1,
553
+ CUDNN_TENSOR_REORDERING_F16x16 = 2,
554
+ } cudnnBackendTensorReordering_t;
555
+
556
+ typedef enum {
557
+ CUDNN_ZERO_PAD = 0,
558
+ CUDNN_NEG_INF_PAD = 1,
559
+ CUDNN_EDGE_VAL_PAD = 2,
560
+ } cudnnPaddingMode_t;
561
+
562
+ typedef enum {
563
+ CUDNN_LAYER_NORM = 0,
564
+ CUDNN_INSTANCE_NORM = 1,
565
+ CUDNN_BATCH_NORM = 2,
566
+ CUDNN_GROUP_NORM = 3,
567
+ } cudnnBackendNormMode_t;
568
+
569
+ typedef enum {
570
+ CUDNN_NORM_FWD_INFERENCE = 0,
571
+ CUDNN_NORM_FWD_TRAINING = 1,
572
+ } cudnnBackendNormFwdPhase_t;
573
+
574
+ cudnnStatus_t CUDNNWINAPI
575
+ cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor);
576
+
577
+ cudnnStatus_t CUDNNWINAPI
578
+ cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor);
579
+
580
+ cudnnStatus_t CUDNNWINAPI
581
+ cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor);
582
+
583
+ cudnnStatus_t CUDNNWINAPI
584
+ cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor);
585
+
586
+ cudnnStatus_t CUDNNWINAPI
587
+ cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor,
588
+ cudnnBackendAttributeName_t attributeName,
589
+ cudnnBackendAttributeType_t attributeType,
590
+ int64_t elementCount,
591
+ const void *arrayOfElements);
592
+
593
+ cudnnStatus_t CUDNNWINAPI
594
+ cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor,
595
+ cudnnBackendAttributeName_t attributeName,
596
+ cudnnBackendAttributeType_t attributeType,
597
+ int64_t requestedElementCount,
598
+ int64_t *elementCount,
599
+ void *arrayOfElements);
600
+
601
+ cudnnStatus_t CUDNNWINAPI
602
+ cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack);
603
+
604
+ #if defined(__cplusplus)
605
+ }
606
+ #endif
607
+
608
+ #endif /* _CUDNN_BACKEND_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #if !defined(CUDNN_CNN_INFER_H_)
55
+ #define CUDNN_CNN_INFER_H_
56
+
57
+ #pragma once
58
+ #include <cuda_runtime.h>
59
+ #include <stdint.h>
60
+
61
+ #include "cudnn_version.h"
62
+ #include "cudnn_ops_infer.h"
63
+
64
+ /* These version numbers are autogenerated, do not edit manually. */
65
+ #define CUDNN_CNN_INFER_MAJOR 8
66
+ #define CUDNN_CNN_INFER_MINOR 9
67
+ #define CUDNN_CNN_INFER_PATCH 2
68
+
69
+ #if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \
70
+ (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL)
71
+ #error Version mismatch in cuDNN CNN INFER!!!
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ extern "C" {
76
+ #endif
77
+
78
+ typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t;
79
+
80
+ /*
81
+ * convolution mode
82
+ */
83
+ typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t;
84
+
85
+ /*
86
+ * CUDNN Reorder
87
+ */
88
+ typedef enum {
89
+ CUDNN_DEFAULT_REORDER = 0,
90
+ CUDNN_NO_REORDER = 1,
91
+ } cudnnReorderType_t;
92
+
93
+ typedef struct cudnnConvolutionFwdAlgoPerfStruct {
94
+ cudnnConvolutionFwdAlgo_t algo;
95
+ cudnnStatus_t status;
96
+ float time;
97
+ size_t memory;
98
+ cudnnDeterminism_t determinism;
99
+ cudnnMathType_t mathType;
100
+ int reserved[3];
101
+ } cudnnConvolutionFwdAlgoPerf_t;
102
+
103
+ /* Create an instance of convolution descriptor */
104
+ cudnnStatus_t CUDNNWINAPI
105
+ cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc);
106
+
107
+ /* Destroy an instance of convolution descriptor */
108
+ cudnnStatus_t CUDNNWINAPI
109
+ cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc);
110
+
111
+ cudnnStatus_t CUDNNWINAPI
112
+ cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType);
113
+
114
+ cudnnStatus_t CUDNNWINAPI
115
+ cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount);
119
+
120
+ cudnnStatus_t CUDNNWINAPI
121
+ cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount);
122
+
123
+ cudnnStatus_t CUDNNWINAPI
124
+ cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType);
125
+
126
+ cudnnStatus_t CUDNNWINAPI
127
+ cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType);
128
+
129
+ cudnnStatus_t CUDNNWINAPI
130
+ cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc,
131
+ int pad_h, /* zero-padding height */
132
+ int pad_w, /* zero-padding width */
133
+ int u, /* vertical filter stride */
134
+ int v, /* horizontal filter stride */
135
+ int dilation_h, /* filter dilation in the vertical dimension */
136
+ int dilation_w, /* filter dilation in the horizontal dimension */
137
+ cudnnConvolutionMode_t mode,
138
+ cudnnDataType_t computeType);
139
+
140
+ cudnnStatus_t CUDNNWINAPI
141
+ cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc,
142
+ int *pad_h, /* zero-padding height */
143
+ int *pad_w, /* zero-padding width */
144
+ int *u, /* vertical filter stride */
145
+ int *v, /* horizontal filter stride */
146
+ int *dilation_h, /* filter dilation in the vertical dimension */
147
+ int *dilation_w, /* filter dilation in the horizontal dimension */
148
+ cudnnConvolutionMode_t *mode,
149
+ cudnnDataType_t *computeType);
150
+
151
+ cudnnStatus_t CUDNNWINAPI
152
+ cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc,
153
+ int arrayLength, /* nbDims-2 size */
154
+ const int padA[],
155
+ const int filterStrideA[],
156
+ const int dilationA[],
157
+ cudnnConvolutionMode_t mode,
158
+ cudnnDataType_t computeType); /* convolution data type */
159
+
160
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
161
+ cudnnStatus_t CUDNNWINAPI
162
+ cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc,
163
+ int arrayLengthRequested,
164
+ int *arrayLength,
165
+ int padA[],
166
+ int strideA[],
167
+ int dilationA[],
168
+ cudnnConvolutionMode_t *mode,
169
+ cudnnDataType_t *computeType); /* convolution data type */
170
+
171
+ cudnnStatus_t CUDNNWINAPI
172
+ cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
173
+ const cudnnTensorDescriptor_t inputTensorDesc,
174
+ const cudnnFilterDescriptor_t filterDesc,
175
+ int *n,
176
+ int *c,
177
+ int *h,
178
+ int *w);
179
+
180
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
181
+ cudnnStatus_t CUDNNWINAPI
182
+ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
183
+ const cudnnTensorDescriptor_t inputTensorDesc,
184
+ const cudnnFilterDescriptor_t filterDesc,
185
+ int nbDims,
186
+ int tensorOuputDimA[]);
187
+
188
+ /* helper function to provide the convolution forward algo that fit best the requirement */
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count);
191
+
192
+ cudnnStatus_t CUDNNWINAPI
193
+ cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle,
194
+ const cudnnTensorDescriptor_t srcDesc,
195
+ const cudnnFilterDescriptor_t filterDesc,
196
+ const cudnnConvolutionDescriptor_t convDesc,
197
+ const cudnnTensorDescriptor_t destDesc,
198
+ const int requestedAlgoCount,
199
+ int *returnedAlgoCount,
200
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle,
204
+ const cudnnTensorDescriptor_t xDesc,
205
+ const cudnnFilterDescriptor_t wDesc,
206
+ const cudnnConvolutionDescriptor_t convDesc,
207
+ const cudnnTensorDescriptor_t yDesc,
208
+ const int requestedAlgoCount,
209
+ int *returnedAlgoCount,
210
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
211
+
212
+ cudnnStatus_t CUDNNWINAPI
213
+ cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle,
214
+ const cudnnTensorDescriptor_t xDesc,
215
+ const void *x,
216
+ const cudnnFilterDescriptor_t wDesc,
217
+ const void *w,
218
+ const cudnnConvolutionDescriptor_t convDesc,
219
+ const cudnnTensorDescriptor_t yDesc,
220
+ void *y,
221
+ const int requestedAlgoCount,
222
+ int *returnedAlgoCount,
223
+ cudnnConvolutionFwdAlgoPerf_t *perfResults,
224
+ void *workSpace,
225
+ size_t workSpaceSizeInBytes);
226
+
227
+ cudnnStatus_t CUDNNWINAPI
228
+ cudnnIm2Col(cudnnHandle_t handle,
229
+ const cudnnTensorDescriptor_t xDesc,
230
+ const void *x,
231
+ const cudnnFilterDescriptor_t wDesc,
232
+ const cudnnConvolutionDescriptor_t convDesc,
233
+ void *colBuffer);
234
+
235
+ cudnnStatus_t CUDNNWINAPI
236
+ cudnnReorderFilterAndBias(cudnnHandle_t handle,
237
+ const cudnnFilterDescriptor_t filterDesc,
238
+ cudnnReorderType_t reorderType,
239
+ const void *filterData,
240
+ void *reorderedFilterData,
241
+ int reorderBias,
242
+ const void *biasData,
243
+ void *reorderedBiasData);
244
+
245
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
246
+ cudnnStatus_t CUDNNWINAPI
247
+ cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle,
248
+ const cudnnTensorDescriptor_t xDesc,
249
+ const cudnnFilterDescriptor_t wDesc,
250
+ const cudnnConvolutionDescriptor_t convDesc,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ cudnnConvolutionFwdAlgo_t algo,
253
+ size_t *sizeInBytes);
254
+
255
+ /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
256
+
257
+ /* Function to perform the forward pass for batch convolution */
258
+ cudnnStatus_t CUDNNWINAPI
259
+ cudnnConvolutionForward(cudnnHandle_t handle,
260
+ const void *alpha,
261
+ const cudnnTensorDescriptor_t xDesc,
262
+ const void *x,
263
+ const cudnnFilterDescriptor_t wDesc,
264
+ const void *w,
265
+ const cudnnConvolutionDescriptor_t convDesc,
266
+ cudnnConvolutionFwdAlgo_t algo,
267
+ void *workSpace,
268
+ size_t workSpaceSizeInBytes,
269
+ const void *beta,
270
+ const cudnnTensorDescriptor_t yDesc,
271
+ void *y);
272
+
273
+ /* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */
274
+ cudnnStatus_t CUDNNWINAPI
275
+ cudnnConvolutionBiasActivationForward(cudnnHandle_t handle,
276
+ const void *alpha1,
277
+ const cudnnTensorDescriptor_t xDesc,
278
+ const void *x,
279
+ const cudnnFilterDescriptor_t wDesc,
280
+ const void *w,
281
+ const cudnnConvolutionDescriptor_t convDesc,
282
+ cudnnConvolutionFwdAlgo_t algo,
283
+ void *workSpace,
284
+ size_t workSpaceSizeInBytes,
285
+ const void *alpha2,
286
+ const cudnnTensorDescriptor_t zDesc,
287
+ const void *z,
288
+ const cudnnTensorDescriptor_t biasDesc,
289
+ const void *bias,
290
+ const cudnnActivationDescriptor_t activationDesc,
291
+ const cudnnTensorDescriptor_t yDesc,
292
+ void *y);
293
+
294
+ /* helper function to provide the convolution backward data algo that fit best the requirement */
295
+
296
+ typedef struct cudnnConvolutionBwdDataAlgoPerfStruct {
297
+ cudnnConvolutionBwdDataAlgo_t algo;
298
+ cudnnStatus_t status;
299
+ float time;
300
+ size_t memory;
301
+ cudnnDeterminism_t determinism;
302
+ cudnnMathType_t mathType;
303
+ int reserved[3];
304
+ } cudnnConvolutionBwdDataAlgoPerf_t;
305
+
306
+ cudnnStatus_t CUDNNWINAPI
307
+ cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count);
308
+
309
+ cudnnStatus_t CUDNNWINAPI
310
+ cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
311
+ const cudnnFilterDescriptor_t wDesc,
312
+ const cudnnTensorDescriptor_t dyDesc,
313
+ const cudnnConvolutionDescriptor_t convDesc,
314
+ const cudnnTensorDescriptor_t dxDesc,
315
+ const int requestedAlgoCount,
316
+ int *returnedAlgoCount,
317
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
318
+
319
+ cudnnStatus_t CUDNNWINAPI
320
+ cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle,
321
+ const cudnnFilterDescriptor_t wDesc,
322
+ const void *w,
323
+ const cudnnTensorDescriptor_t dyDesc,
324
+ const void *dy,
325
+ const cudnnConvolutionDescriptor_t convDesc,
326
+ const cudnnTensorDescriptor_t dxDesc,
327
+ void *dx,
328
+ const int requestedAlgoCount,
329
+ int *returnedAlgoCount,
330
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults,
331
+ void *workSpace,
332
+ size_t workSpaceSizeInBytes);
333
+
334
+ cudnnStatus_t CUDNNWINAPI
335
+ cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle,
336
+ const cudnnFilterDescriptor_t filterDesc,
337
+ const cudnnTensorDescriptor_t diffDesc,
338
+ const cudnnConvolutionDescriptor_t convDesc,
339
+ const cudnnTensorDescriptor_t gradDesc,
340
+ const int requestedAlgoCount,
341
+ int *returnedAlgoCount,
342
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
343
+
344
+ /*
345
+ * convolution algorithm (which requires potentially some workspace)
346
+ */
347
+
348
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
349
+ cudnnStatus_t CUDNNWINAPI
350
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle,
351
+ const cudnnFilterDescriptor_t wDesc,
352
+ const cudnnTensorDescriptor_t dyDesc,
353
+ const cudnnConvolutionDescriptor_t convDesc,
354
+ const cudnnTensorDescriptor_t dxDesc,
355
+ cudnnConvolutionBwdDataAlgo_t algo,
356
+ size_t *sizeInBytes);
357
+
358
+ cudnnStatus_t CUDNNWINAPI
359
+ cudnnConvolutionBackwardData(cudnnHandle_t handle,
360
+ const void *alpha,
361
+ const cudnnFilterDescriptor_t wDesc,
362
+ const void *w,
363
+ const cudnnTensorDescriptor_t dyDesc,
364
+ const void *dy,
365
+ const cudnnConvolutionDescriptor_t convDesc,
366
+ cudnnConvolutionBwdDataAlgo_t algo,
367
+ void *workSpace,
368
+ size_t workSpaceSizeInBytes,
369
+ const void *beta,
370
+ const cudnnTensorDescriptor_t dxDesc,
371
+ void *dx);
372
+
373
+ /* Helper function to calculate folding descriptors for dgrad */
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle,
376
+ const cudnnFilterDescriptor_t filterDesc,
377
+ const cudnnTensorDescriptor_t diffDesc,
378
+ const cudnnConvolutionDescriptor_t convDesc,
379
+ const cudnnTensorDescriptor_t gradDesc,
380
+ const cudnnTensorFormat_t transformFormat,
381
+ cudnnFilterDescriptor_t foldedFilterDesc,
382
+ cudnnTensorDescriptor_t paddedDiffDesc,
383
+ cudnnConvolutionDescriptor_t foldedConvDesc,
384
+ cudnnTensorDescriptor_t foldedGradDesc,
385
+ cudnnTensorTransformDescriptor_t filterFoldTransDesc,
386
+ cudnnTensorTransformDescriptor_t diffPadTransDesc,
387
+ cudnnTensorTransformDescriptor_t gradFoldTransDesc,
388
+ cudnnTensorTransformDescriptor_t gradUnfoldTransDesc);
389
+
390
+ /* cudnnFusedOps... */
391
+ struct cudnnFusedOpsConstParamStruct;
392
+ typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t;
393
+
394
+ struct cudnnFusedOpsVariantParamStruct;
395
+ typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t;
396
+
397
+ struct cudnnFusedOpsPlanStruct;
398
+ typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t;
399
+
400
+ typedef enum {
401
+ /* each op in [ ] can be disabled by passing NULL ptr */
402
+ /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */
403
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0,
404
+ /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */
405
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1,
406
+ /* utility for BN training in BN-conv fusion */
407
+ /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */
408
+ /* optionally update running stats and generate saved stats */
409
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2,
410
+ /* utility for BN inference in BN-conv fusion */
411
+ /* computes the equivalent scale and bias from learned running stats and learned scale, bias */
412
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3,
413
+ /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */
414
+ CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4,
415
+ /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */
416
+ CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5,
417
+ /* reserved for future use */
418
+ CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6,
419
+ } cudnnFusedOps_t;
420
+
421
+ typedef enum {
422
+ /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */
423
+ /* get XDESC: pass previously created cudnnTensorDescriptor_t */
424
+ CUDNN_PARAM_XDESC = 0,
425
+ /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
426
+ CUDNN_PARAM_XDATA_PLACEHOLDER = 1,
427
+ /* set/get BN_MODE: pass cudnnBatchNormMode_t* */
428
+ CUDNN_PARAM_BN_MODE = 2,
429
+ /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
430
+ /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
431
+ CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3,
432
+ /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
433
+ CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4,
434
+ /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
435
+ CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5,
436
+ /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */
437
+ /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */
438
+ CUDNN_PARAM_ACTIVATION_DESC = 6,
439
+ /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */
440
+ /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */
441
+ CUDNN_PARAM_CONV_DESC = 7,
442
+ /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */
443
+ /* get WDESC: pass previously created cudnnFilterDescriptor_t */
444
+ CUDNN_PARAM_WDESC = 8,
445
+ /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
446
+ CUDNN_PARAM_WDATA_PLACEHOLDER = 9,
447
+ /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */
448
+ /* get DWDESC: pass previously created cudnnFilterDescriptor_t */
449
+ CUDNN_PARAM_DWDESC = 10,
450
+ /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
451
+ CUDNN_PARAM_DWDATA_PLACEHOLDER = 11,
452
+ /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */
453
+ /* get YDESC: pass previously created cudnnTensorDescriptor_t */
454
+ CUDNN_PARAM_YDESC = 12,
455
+ /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
456
+ CUDNN_PARAM_YDATA_PLACEHOLDER = 13,
457
+ /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */
458
+ /* get DYDESC: pass previously created cudnnTensorDescriptor_t */
459
+ CUDNN_PARAM_DYDESC = 14,
460
+ /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
461
+ CUDNN_PARAM_DYDATA_PLACEHOLDER = 15,
462
+ /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */
463
+ /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */
464
+ CUDNN_PARAM_YSTATS_DESC = 16,
465
+ /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
466
+ CUDNN_PARAM_YSUM_PLACEHOLDER = 17,
467
+ /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
468
+ CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18,
469
+ /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */
470
+ /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */
471
+ CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19,
472
+ /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
473
+ CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20,
474
+ /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
475
+ CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21,
476
+ /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
477
+ CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22,
478
+ /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
479
+ CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23,
480
+ /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
481
+ CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24,
482
+ /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
483
+ CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25,
484
+
485
+ /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */
486
+ /* get ZDESC: pass previously created cudnnTensorDescriptor_t */
487
+ CUDNN_PARAM_ZDESC = 26,
488
+ /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
489
+ CUDNN_PARAM_ZDATA_PLACEHOLDER = 27,
490
+ /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
491
+ /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
492
+ CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28,
493
+ /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
494
+ CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29,
495
+ /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
496
+ CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30,
497
+
498
+ /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */
499
+ /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */
500
+ CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31,
501
+ /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
502
+ CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32,
503
+
504
+ /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */
505
+ /* get DXDESC: pass previously created cudnnTensorDescriptor_t */
506
+ CUDNN_PARAM_DXDESC = 33,
507
+ /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
508
+ CUDNN_PARAM_DXDATA_PLACEHOLDER = 34,
509
+ /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */
510
+ /* get DZDESC: pass previously created cudnnTensorDescriptor_t */
511
+ CUDNN_PARAM_DZDESC = 35,
512
+ /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
513
+ CUDNN_PARAM_DZDATA_PLACEHOLDER = 36,
514
+ /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
515
+ CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37,
516
+ /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
517
+ CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38,
518
+ } cudnnFusedOpsConstParamLabel_t;
519
+
520
+ typedef enum {
521
+ CUDNN_PTR_NULL = 0,
522
+ CUDNN_PTR_ELEM_ALIGNED = 1,
523
+ CUDNN_PTR_16B_ALIGNED = 2,
524
+ } cudnnFusedOpsPointerPlaceHolder_t;
525
+
526
+ typedef enum {
527
+ /* set: pass void* pointing to dev memory */
528
+ /* get: pass void** pointing to host memory */
529
+ CUDNN_PTR_XDATA = 0,
530
+ CUDNN_PTR_BN_EQSCALE = 1,
531
+ CUDNN_PTR_BN_EQBIAS = 2,
532
+ CUDNN_PTR_WDATA = 3,
533
+ CUDNN_PTR_DWDATA = 4,
534
+ CUDNN_PTR_YDATA = 5,
535
+ CUDNN_PTR_DYDATA = 6,
536
+ CUDNN_PTR_YSUM = 7,
537
+ CUDNN_PTR_YSQSUM = 8,
538
+ CUDNN_PTR_WORKSPACE = 9,
539
+ CUDNN_PTR_BN_SCALE = 10,
540
+ CUDNN_PTR_BN_BIAS = 11,
541
+ CUDNN_PTR_BN_SAVED_MEAN = 12,
542
+ CUDNN_PTR_BN_SAVED_INVSTD = 13,
543
+ CUDNN_PTR_BN_RUNNING_MEAN = 14,
544
+ CUDNN_PTR_BN_RUNNING_VAR = 15,
545
+ CUDNN_PTR_ZDATA = 16,
546
+ CUDNN_PTR_BN_Z_EQSCALE = 17,
547
+ CUDNN_PTR_BN_Z_EQBIAS = 18,
548
+ CUDNN_PTR_ACTIVATION_BITMASK = 19,
549
+ CUDNN_PTR_DXDATA = 20,
550
+ CUDNN_PTR_DZDATA = 21,
551
+ CUDNN_PTR_BN_DSCALE = 22,
552
+ CUDNN_PTR_BN_DBIAS = 23,
553
+
554
+ /* set/get: pass size_t* pointing to host memory */
555
+ CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100,
556
+ /* set/get: pass int64_t* pointing to host memory */
557
+ CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101,
558
+ /* set/get: pass double* pointing to host memory */
559
+ CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102,
560
+ /* set/get: pass double* pointing to host memory */
561
+ CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103,
562
+ } cudnnFusedOpsVariantParamLabel_t;
563
+
564
+ cudnnStatus_t CUDNNWINAPI
565
+ cudnnCnnInferVersionCheck(void);
566
+
567
+ #if defined(__cplusplus)
568
+ }
569
+ #endif
570
+
571
+ #endif /* CUDNN_CNN_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #if !defined(CUDNN_CNN_INFER_H_)
55
+ #define CUDNN_CNN_INFER_H_
56
+
57
+ #pragma once
58
+ #include <cuda_runtime.h>
59
+ #include <stdint.h>
60
+
61
+ #include "cudnn_version.h"
62
+ #include "cudnn_ops_infer.h"
63
+
64
+ /* These version numbers are autogenerated, do not edit manually. */
65
+ #define CUDNN_CNN_INFER_MAJOR 8
66
+ #define CUDNN_CNN_INFER_MINOR 9
67
+ #define CUDNN_CNN_INFER_PATCH 2
68
+
69
+ #if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \
70
+ (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL)
71
+ #error Version mismatch in cuDNN CNN INFER!!!
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ extern "C" {
76
+ #endif
77
+
78
+ typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t;
79
+
80
+ /*
81
+ * convolution mode
82
+ */
83
+ typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t;
84
+
85
+ /*
86
+ * CUDNN Reorder
87
+ */
88
+ typedef enum {
89
+ CUDNN_DEFAULT_REORDER = 0,
90
+ CUDNN_NO_REORDER = 1,
91
+ } cudnnReorderType_t;
92
+
93
+ typedef struct cudnnConvolutionFwdAlgoPerfStruct {
94
+ cudnnConvolutionFwdAlgo_t algo;
95
+ cudnnStatus_t status;
96
+ float time;
97
+ size_t memory;
98
+ cudnnDeterminism_t determinism;
99
+ cudnnMathType_t mathType;
100
+ int reserved[3];
101
+ } cudnnConvolutionFwdAlgoPerf_t;
102
+
103
+ /* Create an instance of convolution descriptor */
104
+ cudnnStatus_t CUDNNWINAPI
105
+ cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc);
106
+
107
+ /* Destroy an instance of convolution descriptor */
108
+ cudnnStatus_t CUDNNWINAPI
109
+ cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc);
110
+
111
+ cudnnStatus_t CUDNNWINAPI
112
+ cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType);
113
+
114
+ cudnnStatus_t CUDNNWINAPI
115
+ cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount);
119
+
120
+ cudnnStatus_t CUDNNWINAPI
121
+ cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount);
122
+
123
+ cudnnStatus_t CUDNNWINAPI
124
+ cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType);
125
+
126
+ cudnnStatus_t CUDNNWINAPI
127
+ cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType);
128
+
129
+ cudnnStatus_t CUDNNWINAPI
130
+ cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc,
131
+ int pad_h, /* zero-padding height */
132
+ int pad_w, /* zero-padding width */
133
+ int u, /* vertical filter stride */
134
+ int v, /* horizontal filter stride */
135
+ int dilation_h, /* filter dilation in the vertical dimension */
136
+ int dilation_w, /* filter dilation in the horizontal dimension */
137
+ cudnnConvolutionMode_t mode,
138
+ cudnnDataType_t computeType);
139
+
140
+ cudnnStatus_t CUDNNWINAPI
141
+ cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc,
142
+ int *pad_h, /* zero-padding height */
143
+ int *pad_w, /* zero-padding width */
144
+ int *u, /* vertical filter stride */
145
+ int *v, /* horizontal filter stride */
146
+ int *dilation_h, /* filter dilation in the vertical dimension */
147
+ int *dilation_w, /* filter dilation in the horizontal dimension */
148
+ cudnnConvolutionMode_t *mode,
149
+ cudnnDataType_t *computeType);
150
+
151
+ cudnnStatus_t CUDNNWINAPI
152
+ cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc,
153
+ int arrayLength, /* nbDims-2 size */
154
+ const int padA[],
155
+ const int filterStrideA[],
156
+ const int dilationA[],
157
+ cudnnConvolutionMode_t mode,
158
+ cudnnDataType_t computeType); /* convolution data type */
159
+
160
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
161
+ cudnnStatus_t CUDNNWINAPI
162
+ cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc,
163
+ int arrayLengthRequested,
164
+ int *arrayLength,
165
+ int padA[],
166
+ int strideA[],
167
+ int dilationA[],
168
+ cudnnConvolutionMode_t *mode,
169
+ cudnnDataType_t *computeType); /* convolution data type */
170
+
171
+ cudnnStatus_t CUDNNWINAPI
172
+ cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
173
+ const cudnnTensorDescriptor_t inputTensorDesc,
174
+ const cudnnFilterDescriptor_t filterDesc,
175
+ int *n,
176
+ int *c,
177
+ int *h,
178
+ int *w);
179
+
180
+ /* Helper function to return the dimensions of the output tensor given a convolution descriptor */
181
+ cudnnStatus_t CUDNNWINAPI
182
+ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc,
183
+ const cudnnTensorDescriptor_t inputTensorDesc,
184
+ const cudnnFilterDescriptor_t filterDesc,
185
+ int nbDims,
186
+ int tensorOuputDimA[]);
187
+
188
+ /* helper function to provide the convolution forward algo that fit best the requirement */
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count);
191
+
192
+ cudnnStatus_t CUDNNWINAPI
193
+ cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle,
194
+ const cudnnTensorDescriptor_t srcDesc,
195
+ const cudnnFilterDescriptor_t filterDesc,
196
+ const cudnnConvolutionDescriptor_t convDesc,
197
+ const cudnnTensorDescriptor_t destDesc,
198
+ const int requestedAlgoCount,
199
+ int *returnedAlgoCount,
200
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle,
204
+ const cudnnTensorDescriptor_t xDesc,
205
+ const cudnnFilterDescriptor_t wDesc,
206
+ const cudnnConvolutionDescriptor_t convDesc,
207
+ const cudnnTensorDescriptor_t yDesc,
208
+ const int requestedAlgoCount,
209
+ int *returnedAlgoCount,
210
+ cudnnConvolutionFwdAlgoPerf_t *perfResults);
211
+
212
+ cudnnStatus_t CUDNNWINAPI
213
+ cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle,
214
+ const cudnnTensorDescriptor_t xDesc,
215
+ const void *x,
216
+ const cudnnFilterDescriptor_t wDesc,
217
+ const void *w,
218
+ const cudnnConvolutionDescriptor_t convDesc,
219
+ const cudnnTensorDescriptor_t yDesc,
220
+ void *y,
221
+ const int requestedAlgoCount,
222
+ int *returnedAlgoCount,
223
+ cudnnConvolutionFwdAlgoPerf_t *perfResults,
224
+ void *workSpace,
225
+ size_t workSpaceSizeInBytes);
226
+
227
+ cudnnStatus_t CUDNNWINAPI
228
+ cudnnIm2Col(cudnnHandle_t handle,
229
+ const cudnnTensorDescriptor_t xDesc,
230
+ const void *x,
231
+ const cudnnFilterDescriptor_t wDesc,
232
+ const cudnnConvolutionDescriptor_t convDesc,
233
+ void *colBuffer);
234
+
235
+ cudnnStatus_t CUDNNWINAPI
236
+ cudnnReorderFilterAndBias(cudnnHandle_t handle,
237
+ const cudnnFilterDescriptor_t filterDesc,
238
+ cudnnReorderType_t reorderType,
239
+ const void *filterData,
240
+ void *reorderedFilterData,
241
+ int reorderBias,
242
+ const void *biasData,
243
+ void *reorderedBiasData);
244
+
245
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
246
+ cudnnStatus_t CUDNNWINAPI
247
+ cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle,
248
+ const cudnnTensorDescriptor_t xDesc,
249
+ const cudnnFilterDescriptor_t wDesc,
250
+ const cudnnConvolutionDescriptor_t convDesc,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ cudnnConvolutionFwdAlgo_t algo,
253
+ size_t *sizeInBytes);
254
+
255
+ /* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */
256
+
257
+ /* Function to perform the forward pass for batch convolution */
258
+ cudnnStatus_t CUDNNWINAPI
259
+ cudnnConvolutionForward(cudnnHandle_t handle,
260
+ const void *alpha,
261
+ const cudnnTensorDescriptor_t xDesc,
262
+ const void *x,
263
+ const cudnnFilterDescriptor_t wDesc,
264
+ const void *w,
265
+ const cudnnConvolutionDescriptor_t convDesc,
266
+ cudnnConvolutionFwdAlgo_t algo,
267
+ void *workSpace,
268
+ size_t workSpaceSizeInBytes,
269
+ const void *beta,
270
+ const cudnnTensorDescriptor_t yDesc,
271
+ void *y);
272
+
273
+ /* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */
274
+ cudnnStatus_t CUDNNWINAPI
275
+ cudnnConvolutionBiasActivationForward(cudnnHandle_t handle,
276
+ const void *alpha1,
277
+ const cudnnTensorDescriptor_t xDesc,
278
+ const void *x,
279
+ const cudnnFilterDescriptor_t wDesc,
280
+ const void *w,
281
+ const cudnnConvolutionDescriptor_t convDesc,
282
+ cudnnConvolutionFwdAlgo_t algo,
283
+ void *workSpace,
284
+ size_t workSpaceSizeInBytes,
285
+ const void *alpha2,
286
+ const cudnnTensorDescriptor_t zDesc,
287
+ const void *z,
288
+ const cudnnTensorDescriptor_t biasDesc,
289
+ const void *bias,
290
+ const cudnnActivationDescriptor_t activationDesc,
291
+ const cudnnTensorDescriptor_t yDesc,
292
+ void *y);
293
+
294
+ /* helper function to provide the convolution backward data algo that fit best the requirement */
295
+
296
+ typedef struct cudnnConvolutionBwdDataAlgoPerfStruct {
297
+ cudnnConvolutionBwdDataAlgo_t algo;
298
+ cudnnStatus_t status;
299
+ float time;
300
+ size_t memory;
301
+ cudnnDeterminism_t determinism;
302
+ cudnnMathType_t mathType;
303
+ int reserved[3];
304
+ } cudnnConvolutionBwdDataAlgoPerf_t;
305
+
306
+ cudnnStatus_t CUDNNWINAPI
307
+ cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count);
308
+
309
+ cudnnStatus_t CUDNNWINAPI
310
+ cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle,
311
+ const cudnnFilterDescriptor_t wDesc,
312
+ const cudnnTensorDescriptor_t dyDesc,
313
+ const cudnnConvolutionDescriptor_t convDesc,
314
+ const cudnnTensorDescriptor_t dxDesc,
315
+ const int requestedAlgoCount,
316
+ int *returnedAlgoCount,
317
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
318
+
319
+ cudnnStatus_t CUDNNWINAPI
320
+ cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle,
321
+ const cudnnFilterDescriptor_t wDesc,
322
+ const void *w,
323
+ const cudnnTensorDescriptor_t dyDesc,
324
+ const void *dy,
325
+ const cudnnConvolutionDescriptor_t convDesc,
326
+ const cudnnTensorDescriptor_t dxDesc,
327
+ void *dx,
328
+ const int requestedAlgoCount,
329
+ int *returnedAlgoCount,
330
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults,
331
+ void *workSpace,
332
+ size_t workSpaceSizeInBytes);
333
+
334
+ cudnnStatus_t CUDNNWINAPI
335
+ cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle,
336
+ const cudnnFilterDescriptor_t filterDesc,
337
+ const cudnnTensorDescriptor_t diffDesc,
338
+ const cudnnConvolutionDescriptor_t convDesc,
339
+ const cudnnTensorDescriptor_t gradDesc,
340
+ const int requestedAlgoCount,
341
+ int *returnedAlgoCount,
342
+ cudnnConvolutionBwdDataAlgoPerf_t *perfResults);
343
+
344
+ /*
345
+ * convolution algorithm (which requires potentially some workspace)
346
+ */
347
+
348
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
349
+ cudnnStatus_t CUDNNWINAPI
350
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle,
351
+ const cudnnFilterDescriptor_t wDesc,
352
+ const cudnnTensorDescriptor_t dyDesc,
353
+ const cudnnConvolutionDescriptor_t convDesc,
354
+ const cudnnTensorDescriptor_t dxDesc,
355
+ cudnnConvolutionBwdDataAlgo_t algo,
356
+ size_t *sizeInBytes);
357
+
358
+ cudnnStatus_t CUDNNWINAPI
359
+ cudnnConvolutionBackwardData(cudnnHandle_t handle,
360
+ const void *alpha,
361
+ const cudnnFilterDescriptor_t wDesc,
362
+ const void *w,
363
+ const cudnnTensorDescriptor_t dyDesc,
364
+ const void *dy,
365
+ const cudnnConvolutionDescriptor_t convDesc,
366
+ cudnnConvolutionBwdDataAlgo_t algo,
367
+ void *workSpace,
368
+ size_t workSpaceSizeInBytes,
369
+ const void *beta,
370
+ const cudnnTensorDescriptor_t dxDesc,
371
+ void *dx);
372
+
373
+ /* Helper function to calculate folding descriptors for dgrad */
374
+ cudnnStatus_t CUDNNWINAPI
375
+ cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle,
376
+ const cudnnFilterDescriptor_t filterDesc,
377
+ const cudnnTensorDescriptor_t diffDesc,
378
+ const cudnnConvolutionDescriptor_t convDesc,
379
+ const cudnnTensorDescriptor_t gradDesc,
380
+ const cudnnTensorFormat_t transformFormat,
381
+ cudnnFilterDescriptor_t foldedFilterDesc,
382
+ cudnnTensorDescriptor_t paddedDiffDesc,
383
+ cudnnConvolutionDescriptor_t foldedConvDesc,
384
+ cudnnTensorDescriptor_t foldedGradDesc,
385
+ cudnnTensorTransformDescriptor_t filterFoldTransDesc,
386
+ cudnnTensorTransformDescriptor_t diffPadTransDesc,
387
+ cudnnTensorTransformDescriptor_t gradFoldTransDesc,
388
+ cudnnTensorTransformDescriptor_t gradUnfoldTransDesc);
389
+
390
+ /* cudnnFusedOps... */
391
+ struct cudnnFusedOpsConstParamStruct;
392
+ typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t;
393
+
394
+ struct cudnnFusedOpsVariantParamStruct;
395
+ typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t;
396
+
397
+ struct cudnnFusedOpsPlanStruct;
398
+ typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t;
399
+
400
+ typedef enum {
401
+ /* each op in [ ] can be disabled by passing NULL ptr */
402
+ /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */
403
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0,
404
+ /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */
405
+ CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1,
406
+ /* utility for BN training in BN-conv fusion */
407
+ /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */
408
+ /* optionally update running stats and generate saved stats */
409
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2,
410
+ /* utility for BN inference in BN-conv fusion */
411
+ /* computes the equivalent scale and bias from learned running stats and learned scale, bias */
412
+ CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3,
413
+ /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */
414
+ CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4,
415
+ /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */
416
+ CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5,
417
+ /* reserved for future use */
418
+ CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6,
419
+ } cudnnFusedOps_t;
420
+
421
+ typedef enum {
422
+ /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */
423
+ /* get XDESC: pass previously created cudnnTensorDescriptor_t */
424
+ CUDNN_PARAM_XDESC = 0,
425
+ /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
426
+ CUDNN_PARAM_XDATA_PLACEHOLDER = 1,
427
+ /* set/get BN_MODE: pass cudnnBatchNormMode_t* */
428
+ CUDNN_PARAM_BN_MODE = 2,
429
+ /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
430
+ /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
431
+ CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3,
432
+ /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
433
+ CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4,
434
+ /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
435
+ CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5,
436
+ /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */
437
+ /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */
438
+ CUDNN_PARAM_ACTIVATION_DESC = 6,
439
+ /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */
440
+ /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */
441
+ CUDNN_PARAM_CONV_DESC = 7,
442
+ /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */
443
+ /* get WDESC: pass previously created cudnnFilterDescriptor_t */
444
+ CUDNN_PARAM_WDESC = 8,
445
+ /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
446
+ CUDNN_PARAM_WDATA_PLACEHOLDER = 9,
447
+ /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */
448
+ /* get DWDESC: pass previously created cudnnFilterDescriptor_t */
449
+ CUDNN_PARAM_DWDESC = 10,
450
+ /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
451
+ CUDNN_PARAM_DWDATA_PLACEHOLDER = 11,
452
+ /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */
453
+ /* get YDESC: pass previously created cudnnTensorDescriptor_t */
454
+ CUDNN_PARAM_YDESC = 12,
455
+ /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
456
+ CUDNN_PARAM_YDATA_PLACEHOLDER = 13,
457
+ /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */
458
+ /* get DYDESC: pass previously created cudnnTensorDescriptor_t */
459
+ CUDNN_PARAM_DYDESC = 14,
460
+ /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
461
+ CUDNN_PARAM_DYDATA_PLACEHOLDER = 15,
462
+ /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */
463
+ /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */
464
+ CUDNN_PARAM_YSTATS_DESC = 16,
465
+ /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
466
+ CUDNN_PARAM_YSUM_PLACEHOLDER = 17,
467
+ /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
468
+ CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18,
469
+ /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */
470
+ /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */
471
+ CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19,
472
+ /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
473
+ CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20,
474
+ /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
475
+ CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21,
476
+ /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
477
+ CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22,
478
+ /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
479
+ CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23,
480
+ /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
481
+ CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24,
482
+ /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
483
+ CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25,
484
+
485
+ /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */
486
+ /* get ZDESC: pass previously created cudnnTensorDescriptor_t */
487
+ CUDNN_PARAM_ZDESC = 26,
488
+ /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
489
+ CUDNN_PARAM_ZDATA_PLACEHOLDER = 27,
490
+ /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */
491
+ /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */
492
+ CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28,
493
+ /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
494
+ CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29,
495
+ /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
496
+ CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30,
497
+
498
+ /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */
499
+ /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */
500
+ CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31,
501
+ /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
502
+ CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32,
503
+
504
+ /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */
505
+ /* get DXDESC: pass previously created cudnnTensorDescriptor_t */
506
+ CUDNN_PARAM_DXDESC = 33,
507
+ /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
508
+ CUDNN_PARAM_DXDATA_PLACEHOLDER = 34,
509
+ /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */
510
+ /* get DZDESC: pass previously created cudnnTensorDescriptor_t */
511
+ CUDNN_PARAM_DZDESC = 35,
512
+ /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
513
+ CUDNN_PARAM_DZDATA_PLACEHOLDER = 36,
514
+ /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
515
+ CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37,
516
+ /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */
517
+ CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38,
518
+ } cudnnFusedOpsConstParamLabel_t;
519
+
520
+ typedef enum {
521
+ CUDNN_PTR_NULL = 0,
522
+ CUDNN_PTR_ELEM_ALIGNED = 1,
523
+ CUDNN_PTR_16B_ALIGNED = 2,
524
+ } cudnnFusedOpsPointerPlaceHolder_t;
525
+
526
+ typedef enum {
527
+ /* set: pass void* pointing to dev memory */
528
+ /* get: pass void** pointing to host memory */
529
+ CUDNN_PTR_XDATA = 0,
530
+ CUDNN_PTR_BN_EQSCALE = 1,
531
+ CUDNN_PTR_BN_EQBIAS = 2,
532
+ CUDNN_PTR_WDATA = 3,
533
+ CUDNN_PTR_DWDATA = 4,
534
+ CUDNN_PTR_YDATA = 5,
535
+ CUDNN_PTR_DYDATA = 6,
536
+ CUDNN_PTR_YSUM = 7,
537
+ CUDNN_PTR_YSQSUM = 8,
538
+ CUDNN_PTR_WORKSPACE = 9,
539
+ CUDNN_PTR_BN_SCALE = 10,
540
+ CUDNN_PTR_BN_BIAS = 11,
541
+ CUDNN_PTR_BN_SAVED_MEAN = 12,
542
+ CUDNN_PTR_BN_SAVED_INVSTD = 13,
543
+ CUDNN_PTR_BN_RUNNING_MEAN = 14,
544
+ CUDNN_PTR_BN_RUNNING_VAR = 15,
545
+ CUDNN_PTR_ZDATA = 16,
546
+ CUDNN_PTR_BN_Z_EQSCALE = 17,
547
+ CUDNN_PTR_BN_Z_EQBIAS = 18,
548
+ CUDNN_PTR_ACTIVATION_BITMASK = 19,
549
+ CUDNN_PTR_DXDATA = 20,
550
+ CUDNN_PTR_DZDATA = 21,
551
+ CUDNN_PTR_BN_DSCALE = 22,
552
+ CUDNN_PTR_BN_DBIAS = 23,
553
+
554
+ /* set/get: pass size_t* pointing to host memory */
555
+ CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100,
556
+ /* set/get: pass int64_t* pointing to host memory */
557
+ CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101,
558
+ /* set/get: pass double* pointing to host memory */
559
+ CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102,
560
+ /* set/get: pass double* pointing to host memory */
561
+ CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103,
562
+ } cudnnFusedOpsVariantParamLabel_t;
563
+
564
+ cudnnStatus_t CUDNNWINAPI
565
+ cudnnCnnInferVersionCheck(void);
566
+
567
+ #if defined(__cplusplus)
568
+ }
569
+ #endif
570
+
571
+ #endif /* CUDNN_CNN_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #pragma once
55
+ #include <cuda_runtime.h>
56
+ #include <stdint.h>
57
+
58
+ #include "cudnn_version.h"
59
+ #include "cudnn_ops_infer.h"
60
+ #include "cudnn_ops_train.h"
61
+ #include "cudnn_cnn_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_CNN_TRAIN_MAJOR 8
65
+ #define CUDNN_CNN_TRAIN_MINOR 9
66
+ #define CUDNN_CNN_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN CNN INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* helper function to provide the convolution backward filter algo that fit best the requirement */
78
+
79
+ typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct {
80
+ cudnnConvolutionBwdFilterAlgo_t algo;
81
+ cudnnStatus_t status;
82
+ float time;
83
+ size_t memory;
84
+ cudnnDeterminism_t determinism;
85
+ cudnnMathType_t mathType;
86
+ int reserved[3];
87
+ } cudnnConvolutionBwdFilterAlgoPerf_t;
88
+
89
+ cudnnStatus_t CUDNNWINAPI
90
+ cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count);
91
+
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
94
+ const cudnnTensorDescriptor_t xDesc,
95
+ const cudnnTensorDescriptor_t dyDesc,
96
+ const cudnnConvolutionDescriptor_t convDesc,
97
+ const cudnnFilterDescriptor_t dwDesc,
98
+ const int requestedAlgoCount,
99
+ int *returnedAlgoCount,
100
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
101
+
102
+ cudnnStatus_t CUDNNWINAPI
103
+ cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle,
104
+ const cudnnTensorDescriptor_t xDesc,
105
+ const void *x,
106
+ const cudnnTensorDescriptor_t dyDesc,
107
+ const void *y,
108
+ const cudnnConvolutionDescriptor_t convDesc,
109
+ const cudnnFilterDescriptor_t dwDesc,
110
+ void *dw,
111
+ const int requestedAlgoCount,
112
+ int *returnedAlgoCount,
113
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults,
114
+ void *workSpace,
115
+ size_t workSpaceSizeInBytes);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle,
119
+ const cudnnTensorDescriptor_t srcDesc,
120
+ const cudnnTensorDescriptor_t diffDesc,
121
+ const cudnnConvolutionDescriptor_t convDesc,
122
+ const cudnnFilterDescriptor_t gradDesc,
123
+ const int requestedAlgoCount,
124
+ int *returnedAlgoCount,
125
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
126
+
127
+ /*
128
+ * convolution algorithm (which requires potentially some workspace)
129
+ */
130
+
131
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
132
+ cudnnStatus_t CUDNNWINAPI
133
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle,
134
+ const cudnnTensorDescriptor_t xDesc,
135
+ const cudnnTensorDescriptor_t dyDesc,
136
+ const cudnnConvolutionDescriptor_t convDesc,
137
+ const cudnnFilterDescriptor_t gradDesc,
138
+ cudnnConvolutionBwdFilterAlgo_t algo,
139
+ size_t *sizeInBytes);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnConvolutionBackwardFilter(cudnnHandle_t handle,
143
+ const void *alpha,
144
+ const cudnnTensorDescriptor_t xDesc,
145
+ const void *x,
146
+ const cudnnTensorDescriptor_t dyDesc,
147
+ const void *dy,
148
+ const cudnnConvolutionDescriptor_t convDesc,
149
+ cudnnConvolutionBwdFilterAlgo_t algo,
150
+ void *workSpace,
151
+ size_t workSpaceSizeInBytes,
152
+ const void *beta,
153
+ const cudnnFilterDescriptor_t dwDesc,
154
+ void *dw);
155
+
156
+ /* Function to compute the bias gradient for batch convolution */
157
+ cudnnStatus_t CUDNNWINAPI
158
+ cudnnConvolutionBackwardBias(cudnnHandle_t handle,
159
+ const void *alpha,
160
+ const cudnnTensorDescriptor_t dyDesc,
161
+ const void *dy,
162
+ const void *beta,
163
+ const cudnnTensorDescriptor_t dbDesc,
164
+ void *db);
165
+
166
+ cudnnStatus_t CUDNNWINAPI
167
+ cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops);
168
+
169
+ cudnnStatus_t CUDNNWINAPI
170
+ cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack);
171
+
172
+ cudnnStatus_t CUDNNWINAPI
173
+ cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack,
174
+ cudnnFusedOpsConstParamLabel_t paramLabel,
175
+ const void *param);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack,
179
+ cudnnFusedOpsConstParamLabel_t paramLabel,
180
+ void *param,
181
+ int *isNULL);
182
+
183
+ cudnnStatus_t CUDNNWINAPI
184
+ cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops);
185
+
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack);
188
+
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack,
191
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
192
+ void *ptr);
193
+
194
+ cudnnStatus_t CUDNNWINAPI
195
+ cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack,
196
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
197
+ void *ptr);
198
+
199
+ cudnnStatus_t CUDNNWINAPI
200
+ cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan);
204
+
205
+ cudnnStatus_t CUDNNWINAPI
206
+ cudnnMakeFusedOpsPlan(cudnnHandle_t handle,
207
+ cudnnFusedOpsPlan_t plan,
208
+ const cudnnFusedOpsConstParamPack_t constPack,
209
+ size_t *workspaceSizeInBytes);
210
+
211
+ cudnnStatus_t CUDNNWINAPI
212
+ cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack);
213
+
214
+ cudnnStatus_t CUDNNWINAPI
215
+ cudnnCnnTrainVersionCheck(void);
216
+
217
+ #if defined(__cplusplus)
218
+ }
219
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions.
52
+ */
53
+
54
+ #pragma once
55
+ #include <cuda_runtime.h>
56
+ #include <stdint.h>
57
+
58
+ #include "cudnn_version.h"
59
+ #include "cudnn_ops_infer.h"
60
+ #include "cudnn_ops_train.h"
61
+ #include "cudnn_cnn_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_CNN_TRAIN_MAJOR 8
65
+ #define CUDNN_CNN_TRAIN_MINOR 9
66
+ #define CUDNN_CNN_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN CNN INFER!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* helper function to provide the convolution backward filter algo that fit best the requirement */
78
+
79
+ typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct {
80
+ cudnnConvolutionBwdFilterAlgo_t algo;
81
+ cudnnStatus_t status;
82
+ float time;
83
+ size_t memory;
84
+ cudnnDeterminism_t determinism;
85
+ cudnnMathType_t mathType;
86
+ int reserved[3];
87
+ } cudnnConvolutionBwdFilterAlgoPerf_t;
88
+
89
+ cudnnStatus_t CUDNNWINAPI
90
+ cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count);
91
+
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle,
94
+ const cudnnTensorDescriptor_t xDesc,
95
+ const cudnnTensorDescriptor_t dyDesc,
96
+ const cudnnConvolutionDescriptor_t convDesc,
97
+ const cudnnFilterDescriptor_t dwDesc,
98
+ const int requestedAlgoCount,
99
+ int *returnedAlgoCount,
100
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
101
+
102
+ cudnnStatus_t CUDNNWINAPI
103
+ cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle,
104
+ const cudnnTensorDescriptor_t xDesc,
105
+ const void *x,
106
+ const cudnnTensorDescriptor_t dyDesc,
107
+ const void *y,
108
+ const cudnnConvolutionDescriptor_t convDesc,
109
+ const cudnnFilterDescriptor_t dwDesc,
110
+ void *dw,
111
+ const int requestedAlgoCount,
112
+ int *returnedAlgoCount,
113
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults,
114
+ void *workSpace,
115
+ size_t workSpaceSizeInBytes);
116
+
117
+ cudnnStatus_t CUDNNWINAPI
118
+ cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle,
119
+ const cudnnTensorDescriptor_t srcDesc,
120
+ const cudnnTensorDescriptor_t diffDesc,
121
+ const cudnnConvolutionDescriptor_t convDesc,
122
+ const cudnnFilterDescriptor_t gradDesc,
123
+ const int requestedAlgoCount,
124
+ int *returnedAlgoCount,
125
+ cudnnConvolutionBwdFilterAlgoPerf_t *perfResults);
126
+
127
+ /*
128
+ * convolution algorithm (which requires potentially some workspace)
129
+ */
130
+
131
+ /* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/
132
+ cudnnStatus_t CUDNNWINAPI
133
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle,
134
+ const cudnnTensorDescriptor_t xDesc,
135
+ const cudnnTensorDescriptor_t dyDesc,
136
+ const cudnnConvolutionDescriptor_t convDesc,
137
+ const cudnnFilterDescriptor_t gradDesc,
138
+ cudnnConvolutionBwdFilterAlgo_t algo,
139
+ size_t *sizeInBytes);
140
+
141
+ cudnnStatus_t CUDNNWINAPI
142
+ cudnnConvolutionBackwardFilter(cudnnHandle_t handle,
143
+ const void *alpha,
144
+ const cudnnTensorDescriptor_t xDesc,
145
+ const void *x,
146
+ const cudnnTensorDescriptor_t dyDesc,
147
+ const void *dy,
148
+ const cudnnConvolutionDescriptor_t convDesc,
149
+ cudnnConvolutionBwdFilterAlgo_t algo,
150
+ void *workSpace,
151
+ size_t workSpaceSizeInBytes,
152
+ const void *beta,
153
+ const cudnnFilterDescriptor_t dwDesc,
154
+ void *dw);
155
+
156
+ /* Function to compute the bias gradient for batch convolution */
157
+ cudnnStatus_t CUDNNWINAPI
158
+ cudnnConvolutionBackwardBias(cudnnHandle_t handle,
159
+ const void *alpha,
160
+ const cudnnTensorDescriptor_t dyDesc,
161
+ const void *dy,
162
+ const void *beta,
163
+ const cudnnTensorDescriptor_t dbDesc,
164
+ void *db);
165
+
166
+ cudnnStatus_t CUDNNWINAPI
167
+ cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops);
168
+
169
+ cudnnStatus_t CUDNNWINAPI
170
+ cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack);
171
+
172
+ cudnnStatus_t CUDNNWINAPI
173
+ cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack,
174
+ cudnnFusedOpsConstParamLabel_t paramLabel,
175
+ const void *param);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack,
179
+ cudnnFusedOpsConstParamLabel_t paramLabel,
180
+ void *param,
181
+ int *isNULL);
182
+
183
+ cudnnStatus_t CUDNNWINAPI
184
+ cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops);
185
+
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack);
188
+
189
+ cudnnStatus_t CUDNNWINAPI
190
+ cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack,
191
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
192
+ void *ptr);
193
+
194
+ cudnnStatus_t CUDNNWINAPI
195
+ cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack,
196
+ cudnnFusedOpsVariantParamLabel_t paramLabel,
197
+ void *ptr);
198
+
199
+ cudnnStatus_t CUDNNWINAPI
200
+ cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops);
201
+
202
+ cudnnStatus_t CUDNNWINAPI
203
+ cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan);
204
+
205
+ cudnnStatus_t CUDNNWINAPI
206
+ cudnnMakeFusedOpsPlan(cudnnHandle_t handle,
207
+ cudnnFusedOpsPlan_t plan,
208
+ const cudnnFusedOpsConstParamPack_t constPack,
209
+ size_t *workspaceSizeInBytes);
210
+
211
+ cudnnStatus_t CUDNNWINAPI
212
+ cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack);
213
+
214
+ cudnnStatus_t CUDNNWINAPI
215
+ cudnnCnnTrainVersionCheck(void);
216
+
217
+ #if defined(__cplusplus)
218
+ }
219
+ #endif
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_infer : cuDNN's basic definitions and inference operations.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_INFER_H_)
55
+ #define CUDNN_OPS_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+
62
+ /* These version numbers are autogenerated, do not edit manually. */
63
+ #define CUDNN_OPS_INFER_MAJOR 8
64
+ #define CUDNN_OPS_INFER_MINOR 9
65
+ #define CUDNN_OPS_INFER_PATCH 2
66
+
67
+ #if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \
68
+ (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL)
69
+ #error Version mismatch in cuDNN OPS INFER!!!
70
+ #endif
71
+
72
+ #ifndef CUDNNWINAPI
73
+ #ifdef _WIN32
74
+ #define CUDNNWINAPI __stdcall
75
+ #else
76
+ #define CUDNNWINAPI
77
+ #endif
78
+ #endif
79
+
80
+ /* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */
81
+ #if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__))
82
+ /* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */
83
+ #define CUDNN_DEPRECATED __attribute__((deprecated))
84
+ #elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER)
85
+ /* Microsoft Visual C++ */
86
+ #define CUDNN_DEPRECATED __declspec(deprecated)
87
+ #elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L)
88
+ /* C++14 compilers */
89
+ #define CUDNN_DEPRECATED [[deprecated]]
90
+ #else
91
+ /* No support for the deprecated attribute */
92
+ #define CUDNN_DEPRECATED
93
+ #endif
94
+
95
+ #if defined(__cplusplus)
96
+ extern "C" {
97
+ #endif
98
+
99
+ struct cudnnContext;
100
+ typedef struct cudnnContext *cudnnHandle_t;
101
+
102
+ size_t CUDNNWINAPI
103
+ cudnnGetVersion(void);
104
+
105
+ size_t CUDNNWINAPI
106
+ cudnnGetMaxDeviceVersion(void);
107
+
108
+ /* Returns CUDA Runtime version statically linked against cudnn */
109
+ size_t CUDNNWINAPI
110
+ cudnnGetCudartVersion(void);
111
+
112
+ /*
113
+ * CUDNN return codes
114
+ */
115
+ typedef enum {
116
+ CUDNN_STATUS_SUCCESS = 0,
117
+ CUDNN_STATUS_NOT_INITIALIZED = 1,
118
+ CUDNN_STATUS_ALLOC_FAILED = 2,
119
+ CUDNN_STATUS_BAD_PARAM = 3,
120
+ CUDNN_STATUS_INTERNAL_ERROR = 4,
121
+ CUDNN_STATUS_INVALID_VALUE = 5,
122
+ CUDNN_STATUS_ARCH_MISMATCH = 6,
123
+ CUDNN_STATUS_MAPPING_ERROR = 7,
124
+ CUDNN_STATUS_EXECUTION_FAILED = 8,
125
+ CUDNN_STATUS_NOT_SUPPORTED = 9,
126
+ CUDNN_STATUS_LICENSE_ERROR = 10,
127
+ CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11,
128
+ CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12,
129
+ CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13,
130
+ CUDNN_STATUS_VERSION_MISMATCH = 14,
131
+ } cudnnStatus_t;
132
+
133
+ /* human-readable error messages */
134
+ const char *CUDNNWINAPI
135
+ cudnnGetErrorString(cudnnStatus_t status);
136
+
137
+ /* Forward definition in this version only */
138
+ typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t;
139
+
140
+ typedef enum {
141
+ CUDNN_ERRQUERY_RAWCODE = 0,
142
+ CUDNN_ERRQUERY_NONBLOCKING = 1,
143
+ CUDNN_ERRQUERY_BLOCKING = 2,
144
+ } cudnnErrQueryMode_t;
145
+
146
+ cudnnStatus_t CUDNNWINAPI
147
+ cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag);
148
+
149
+ #ifndef __LIBRARY_TYPES_H__
150
+
151
+ typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType;
152
+
153
+ #endif
154
+
155
+ cudnnStatus_t CUDNNWINAPI
156
+ cudnnGetProperty(libraryPropertyType type, int *value);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnCreate(cudnnHandle_t *handle);
160
+ cudnnStatus_t CUDNNWINAPI
161
+ cudnnDestroy(cudnnHandle_t handle);
162
+ cudnnStatus_t CUDNNWINAPI
163
+ cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId);
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId);
166
+
167
+ /* Data structures to represent Image/Filter and the Neural Network Layer */
168
+ typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t;
169
+ typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t;
170
+ typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t;
171
+ typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t;
172
+ typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t;
173
+ typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t;
174
+ typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t;
175
+ typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t;
176
+ typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t;
177
+ typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t;
178
+ /*
179
+ * CUDNN data type
180
+ */
181
+ typedef enum {
182
+ CUDNN_DATA_FLOAT = 0,
183
+ CUDNN_DATA_DOUBLE = 1,
184
+ CUDNN_DATA_HALF = 2,
185
+ CUDNN_DATA_INT8 = 3,
186
+ CUDNN_DATA_INT32 = 4,
187
+ CUDNN_DATA_INT8x4 = 5,
188
+ CUDNN_DATA_UINT8 = 6,
189
+ CUDNN_DATA_UINT8x4 = 7,
190
+ CUDNN_DATA_INT8x32 = 8,
191
+ CUDNN_DATA_BFLOAT16 = 9,
192
+ CUDNN_DATA_INT64 = 10,
193
+ CUDNN_DATA_BOOLEAN = 11,
194
+ CUDNN_DATA_FP8_E4M3 = 12,
195
+ CUDNN_DATA_FP8_E5M2 = 13,
196
+ CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14,
197
+ } cudnnDataType_t;
198
+
199
+ /*
200
+ * CUDNN math type
201
+ */
202
+ typedef enum {
203
+ CUDNN_DEFAULT_MATH = 0,
204
+ CUDNN_TENSOR_OP_MATH = 1,
205
+ CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2,
206
+ CUDNN_FMA_MATH = 3,
207
+ } cudnnMathType_t;
208
+
209
+ /*
210
+ * CUDNN propagate Nan
211
+ */
212
+ typedef enum {
213
+ CUDNN_NOT_PROPAGATE_NAN = 0,
214
+ CUDNN_PROPAGATE_NAN = 1,
215
+ } cudnnNanPropagation_t;
216
+
217
+ /*
218
+ * CUDNN Determinism
219
+ */
220
+ typedef enum {
221
+ CUDNN_NON_DETERMINISTIC = 0,
222
+ CUDNN_DETERMINISTIC = 1,
223
+ } cudnnDeterminism_t;
224
+
225
+ /* Maximum supported number of tensor dimensions */
226
+ #define CUDNN_DIM_MAX 8
227
+
228
+ /* Create an instance of a generic Tensor descriptor */
229
+ cudnnStatus_t CUDNNWINAPI
230
+ cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc);
231
+
232
+ typedef enum {
233
+ CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */
234
+ CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/
235
+ CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */
236
+ } cudnnTensorFormat_t;
237
+
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc,
240
+ cudnnTensorFormat_t format,
241
+ cudnnDataType_t dataType, /* image data type */
242
+ int n, /* number of inputs (batch size) */
243
+ int c, /* number of input feature maps */
244
+ int h, /* height of input section */
245
+ int w); /* width of input section */
246
+
247
+ cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
249
+ cudnnDataType_t dataType, /* image data type */
250
+ int n, /* number of inputs (batch size) */
251
+ int c, /* number of input feature maps */
252
+ int h, /* height of input section */
253
+ int w, /* width of input section */
254
+ int nStride,
255
+ int cStride,
256
+ int hStride,
257
+ int wStride);
258
+
259
+ cudnnStatus_t CUDNNWINAPI
260
+ cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc,
261
+ cudnnDataType_t *dataType, /* image data type */
262
+ int *n, /* number of inputs (batch size) */
263
+ int *c, /* number of input feature maps */
264
+ int *h, /* height of input section */
265
+ int *w, /* width of input section */
266
+ int *nStride,
267
+ int *cStride,
268
+ int *hStride,
269
+ int *wStride);
270
+
271
+ cudnnStatus_t CUDNNWINAPI
272
+ cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc,
273
+ cudnnDataType_t dataType,
274
+ int nbDims,
275
+ const int dimA[],
276
+ const int strideA[]);
277
+
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
280
+ cudnnTensorFormat_t format,
281
+ cudnnDataType_t dataType,
282
+ int nbDims,
283
+ const int dimA[]);
284
+
285
+ cudnnStatus_t CUDNNWINAPI
286
+ cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc,
287
+ int nbDimsRequested,
288
+ cudnnDataType_t *dataType,
289
+ int *nbDims,
290
+ int dimA[],
291
+ int strideA[]);
292
+
293
+ cudnnStatus_t CUDNNWINAPI
294
+ cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size);
295
+
296
+ /* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride
297
+
298
+ 1)Example of all images in row major order one batch of features after the other (with an optional padding on row)
299
+ input_stride : c x h x h_stride
300
+ feature_stride : h x h_stride
301
+ h_stride : >= w ( h_stride = w if no padding)
302
+ w_stride : 1
303
+
304
+
305
+ 2)Example of all images in row major with features maps interleaved
306
+ input_stride : c x h x h_stride
307
+ feature_stride : 1
308
+ h_stride : w x c
309
+ w_stride : c
310
+
311
+ 3)Example of all images in column major order one batch of features after the other (with optional padding on column)
312
+ input_stride : c x w x w_stride
313
+ feature_stride : w x w_stride
314
+ h_stride : 1
315
+ w_stride : >= h
316
+
317
+ */
318
+
319
+ /* Destroy an instance of Tensor4d descriptor */
320
+ cudnnStatus_t CUDNNWINAPI
321
+ cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc);
322
+
323
+ /* Fold/unfold transforms */
324
+ typedef enum {
325
+ CUDNN_TRANSFORM_FOLD = 0U,
326
+ CUDNN_TRANSFORM_UNFOLD = 1U,
327
+ } cudnnFoldingDirection_t;
328
+
329
+ /** Create a destination descriptor for cudnnTransformTensor */
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc,
332
+ const cudnnTensorDescriptor_t srcDesc,
333
+ cudnnTensorDescriptor_t destDesc,
334
+ size_t *destSizeInBytes);
335
+
336
+ /** Create an empty tensor transform descriptor */
337
+ cudnnStatus_t CUDNNWINAPI
338
+ cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc);
339
+
340
+ /** Initialize a previously created tensor transform descriptor. */
341
+ cudnnStatus_t CUDNNWINAPI
342
+ cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
343
+ const uint32_t nbDims,
344
+ const cudnnTensorFormat_t destFormat,
345
+ const int32_t padBeforeA[],
346
+ const int32_t padAfterA[],
347
+ const uint32_t foldA[],
348
+ const cudnnFoldingDirection_t direction);
349
+
350
+ /**
351
+ * Retrieves the values stored in a previously initialized tensor transform
352
+ * descriptor.
353
+ */
354
+ cudnnStatus_t CUDNNWINAPI
355
+ cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
356
+ uint32_t nbDimsRequested,
357
+ cudnnTensorFormat_t *destFormat,
358
+ int32_t padBeforeA[],
359
+ int32_t padAfterA[],
360
+ uint32_t foldA[],
361
+ cudnnFoldingDirection_t *direction);
362
+
363
+ /**
364
+ * Destroys a previously created tensor transform descriptor.
365
+ */
366
+ cudnnStatus_t CUDNNWINAPI
367
+ cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc);
368
+
369
+ /* Tensor layout conversion helper (y = alpha * x + beta * y) */
370
+ cudnnStatus_t CUDNNWINAPI
371
+ cudnnTransformTensor(cudnnHandle_t handle,
372
+ const void *alpha,
373
+ const cudnnTensorDescriptor_t xDesc,
374
+ const void *x,
375
+ const void *beta,
376
+ const cudnnTensorDescriptor_t yDesc,
377
+ void *y);
378
+
379
+ cudnnStatus_t CUDNNWINAPI
380
+ cudnnTransformTensorEx(cudnnHandle_t handle,
381
+ const cudnnTensorTransformDescriptor_t transDesc,
382
+ const void *alpha,
383
+ const cudnnTensorDescriptor_t srcDesc,
384
+ const void *srcData,
385
+ const void *beta,
386
+ const cudnnTensorDescriptor_t destDesc,
387
+ void *destData);
388
+
389
+ /* Tensor Bias addition : C = alpha * A + beta * C */
390
+ cudnnStatus_t CUDNNWINAPI
391
+ cudnnAddTensor(cudnnHandle_t handle,
392
+ const void *alpha,
393
+ const cudnnTensorDescriptor_t aDesc,
394
+ const void *A,
395
+ const void *beta,
396
+ const cudnnTensorDescriptor_t cDesc,
397
+ void *C);
398
+
399
+ /*
400
+ * CUDNN OpTensor op type
401
+ */
402
+ typedef enum {
403
+ CUDNN_OP_TENSOR_ADD = 0,
404
+ CUDNN_OP_TENSOR_MUL = 1,
405
+ CUDNN_OP_TENSOR_MIN = 2,
406
+ CUDNN_OP_TENSOR_MAX = 3,
407
+ CUDNN_OP_TENSOR_SQRT = 4,
408
+ CUDNN_OP_TENSOR_NOT = 5,
409
+ } cudnnOpTensorOp_t;
410
+
411
+ cudnnStatus_t CUDNNWINAPI
412
+ cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc);
413
+
414
+ cudnnStatus_t CUDNNWINAPI
415
+ cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc,
416
+ cudnnOpTensorOp_t opTensorOp,
417
+ cudnnDataType_t opTensorCompType,
418
+ cudnnNanPropagation_t opTensorNanOpt);
419
+
420
+ cudnnStatus_t CUDNNWINAPI
421
+ cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc,
422
+ cudnnOpTensorOp_t *opTensorOp,
423
+ cudnnDataType_t *opTensorCompType,
424
+ cudnnNanPropagation_t *opTensorNanOpt);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc);
428
+
429
+ /* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */
430
+ /* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */
431
+ cudnnStatus_t CUDNNWINAPI
432
+ cudnnOpTensor(cudnnHandle_t handle,
433
+ const cudnnOpTensorDescriptor_t opTensorDesc,
434
+ const void *alpha1,
435
+ const cudnnTensorDescriptor_t aDesc,
436
+ const void *A,
437
+ const void *alpha2,
438
+ const cudnnTensorDescriptor_t bDesc,
439
+ const void *B,
440
+ const void *beta,
441
+ const cudnnTensorDescriptor_t cDesc,
442
+ void *C);
443
+
444
+ /*
445
+ * CUDNN ReduceTensor op type
446
+ */
447
+ typedef enum {
448
+ CUDNN_REDUCE_TENSOR_ADD = 0,
449
+ CUDNN_REDUCE_TENSOR_MUL = 1,
450
+ CUDNN_REDUCE_TENSOR_MIN = 2,
451
+ CUDNN_REDUCE_TENSOR_MAX = 3,
452
+ CUDNN_REDUCE_TENSOR_AMAX = 4,
453
+ CUDNN_REDUCE_TENSOR_AVG = 5,
454
+ CUDNN_REDUCE_TENSOR_NORM1 = 6,
455
+ CUDNN_REDUCE_TENSOR_NORM2 = 7,
456
+ CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8,
457
+ } cudnnReduceTensorOp_t;
458
+
459
+ /*
460
+ * CUDNN ReduceTensor indices type
461
+ */
462
+ typedef enum {
463
+ CUDNN_REDUCE_TENSOR_NO_INDICES = 0,
464
+ CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1,
465
+ } cudnnReduceTensorIndices_t;
466
+
467
+ /*
468
+ * CUDNN tensor indices type size (all unsigned)
469
+ * Currently not supported, default is 32 bit unsigned.
470
+ */
471
+ typedef enum {
472
+ CUDNN_32BIT_INDICES = 0,
473
+ CUDNN_64BIT_INDICES = 1,
474
+ CUDNN_16BIT_INDICES = 2,
475
+ CUDNN_8BIT_INDICES = 3,
476
+ } cudnnIndicesType_t;
477
+
478
+ cudnnStatus_t CUDNNWINAPI
479
+ cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc);
480
+
481
+ cudnnStatus_t CUDNNWINAPI
482
+ cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc,
483
+ cudnnReduceTensorOp_t reduceTensorOp,
484
+ cudnnDataType_t reduceTensorCompType,
485
+ cudnnNanPropagation_t reduceTensorNanOpt,
486
+ cudnnReduceTensorIndices_t reduceTensorIndices,
487
+ cudnnIndicesType_t reduceTensorIndicesType);
488
+
489
+ cudnnStatus_t CUDNNWINAPI
490
+ cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc,
491
+ cudnnReduceTensorOp_t *reduceTensorOp,
492
+ cudnnDataType_t *reduceTensorCompType,
493
+ cudnnNanPropagation_t *reduceTensorNanOpt,
494
+ cudnnReduceTensorIndices_t *reduceTensorIndices,
495
+ cudnnIndicesType_t *reduceTensorIndicesType);
496
+
497
+ cudnnStatus_t CUDNNWINAPI
498
+ cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc);
499
+
500
+ /* Helper function to return the minimum size of the index space to be passed to the reduction given the input and
501
+ * output tensors */
502
+ cudnnStatus_t CUDNNWINAPI
503
+ cudnnGetReductionIndicesSize(cudnnHandle_t handle,
504
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
505
+ const cudnnTensorDescriptor_t aDesc,
506
+ const cudnnTensorDescriptor_t cDesc,
507
+ size_t *sizeInBytes);
508
+
509
+ /* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output
510
+ * tensors */
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetReductionWorkspaceSize(cudnnHandle_t handle,
513
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
514
+ const cudnnTensorDescriptor_t aDesc,
515
+ const cudnnTensorDescriptor_t cDesc,
516
+ size_t *sizeInBytes);
517
+
518
+ /* Tensor operation : C = reduce op( alpha * A ) + beta * C */
519
+ /* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */
520
+ /* The indices space is ignored for reduce ops other than min or max. */
521
+ cudnnStatus_t CUDNNWINAPI
522
+ cudnnReduceTensor(cudnnHandle_t handle,
523
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
524
+ void *indices,
525
+ size_t indicesSizeInBytes,
526
+ void *workspace,
527
+ size_t workspaceSizeInBytes,
528
+ const void *alpha,
529
+ const cudnnTensorDescriptor_t aDesc,
530
+ const void *A,
531
+ const void *beta,
532
+ const cudnnTensorDescriptor_t cDesc,
533
+ void *C);
534
+
535
+ /* Set all values of a tensor to a given value : y[i] = value[0] */
536
+ cudnnStatus_t CUDNNWINAPI
537
+ cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr);
538
+
539
+ /* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */
540
+ cudnnStatus_t CUDNNWINAPI
541
+ cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha);
542
+
543
+ /* Create an instance of FilterStruct */
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc,
549
+ cudnnDataType_t dataType, /* image data type */
550
+ cudnnTensorFormat_t format,
551
+ int k, /* number of output feature maps */
552
+ int c, /* number of input feature maps */
553
+ int h, /* height of each input filter */
554
+ int w); /* width of each input filter */
555
+
556
+ cudnnStatus_t CUDNNWINAPI
557
+ cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc,
558
+ cudnnDataType_t *dataType, /* image data type */
559
+ cudnnTensorFormat_t *format,
560
+ int *k, /* number of output feature maps */
561
+ int *c, /* number of input feature maps */
562
+ int *h, /* height of each input filter */
563
+ int *w); /* width of each input filter */
564
+
565
+ cudnnStatus_t CUDNNWINAPI
566
+ cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc,
567
+ cudnnDataType_t dataType, /* image data type */
568
+ cudnnTensorFormat_t format,
569
+ int nbDims,
570
+ const int filterDimA[]);
571
+
572
+ cudnnStatus_t CUDNNWINAPI
573
+ cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc,
574
+ int nbDimsRequested,
575
+ cudnnDataType_t *dataType, /* image data type */
576
+ cudnnTensorFormat_t *format,
577
+ int *nbDims,
578
+ int filterDimA[]);
579
+ cudnnStatus_t CUDNNWINAPI
580
+ cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size);
581
+
582
+ cudnnStatus_t CUDNNWINAPI
583
+ cudnnTransformFilter(cudnnHandle_t handle,
584
+ const cudnnTensorTransformDescriptor_t transDesc,
585
+ const void *alpha,
586
+ const cudnnFilterDescriptor_t srcDesc,
587
+ const void *srcData,
588
+ const void *beta,
589
+ const cudnnFilterDescriptor_t destDesc,
590
+ void *destData);
591
+
592
+ cudnnStatus_t CUDNNWINAPI
593
+ cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc);
594
+
595
+ /*
596
+ * softmax algorithm
597
+ */
598
+ typedef enum {
599
+ CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */
600
+ CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */
601
+ CUDNN_SOFTMAX_LOG = 2
602
+ } cudnnSoftmaxAlgorithm_t;
603
+
604
+ typedef enum {
605
+ CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */
606
+ CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */
607
+ } cudnnSoftmaxMode_t;
608
+
609
+ /* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */
610
+
611
+ /* Function to perform forward softmax */
612
+ cudnnStatus_t CUDNNWINAPI
613
+ cudnnSoftmaxForward(cudnnHandle_t handle,
614
+ cudnnSoftmaxAlgorithm_t algo,
615
+ cudnnSoftmaxMode_t mode,
616
+ const void *alpha,
617
+ const cudnnTensorDescriptor_t xDesc,
618
+ const void *x,
619
+ const void *beta,
620
+ const cudnnTensorDescriptor_t yDesc,
621
+ void *y);
622
+
623
+ /*
624
+ * pooling mode
625
+ */
626
+ typedef enum {
627
+ CUDNN_POOLING_MAX = 0,
628
+ CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */
629
+ CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */
630
+ CUDNN_POOLING_MAX_DETERMINISTIC = 3
631
+ } cudnnPoolingMode_t;
632
+
633
+ /* Create an instance of pooling descriptor */
634
+ cudnnStatus_t CUDNNWINAPI
635
+ cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc);
636
+
637
+ cudnnStatus_t CUDNNWINAPI
638
+ cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc,
639
+ cudnnPoolingMode_t mode,
640
+ cudnnNanPropagation_t maxpoolingNanOpt,
641
+ int windowHeight,
642
+ int windowWidth,
643
+ int verticalPadding,
644
+ int horizontalPadding,
645
+ int verticalStride,
646
+ int horizontalStride);
647
+
648
+ cudnnStatus_t CUDNNWINAPI
649
+ cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
650
+ cudnnPoolingMode_t *mode,
651
+ cudnnNanPropagation_t *maxpoolingNanOpt,
652
+ int *windowHeight,
653
+ int *windowWidth,
654
+ int *verticalPadding,
655
+ int *horizontalPadding,
656
+ int *verticalStride,
657
+ int *horizontalStride);
658
+
659
+ cudnnStatus_t CUDNNWINAPI
660
+ cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc,
661
+ const cudnnPoolingMode_t mode,
662
+ const cudnnNanPropagation_t maxpoolingNanOpt,
663
+ int nbDims,
664
+ const int windowDimA[],
665
+ const int paddingA[],
666
+ const int strideA[]);
667
+
668
+ cudnnStatus_t CUDNNWINAPI
669
+ cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
670
+ int nbDimsRequested,
671
+ cudnnPoolingMode_t *mode,
672
+ cudnnNanPropagation_t *maxpoolingNanOpt,
673
+ int *nbDims,
674
+ int windowDimA[],
675
+ int paddingA[],
676
+ int strideA[]);
677
+
678
+ cudnnStatus_t CUDNNWINAPI
679
+ cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
680
+ const cudnnTensorDescriptor_t inputTensorDesc,
681
+ int nbDims,
682
+ int outputTensorDimA[]);
683
+
684
+ cudnnStatus_t CUDNNWINAPI
685
+ cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
686
+ const cudnnTensorDescriptor_t inputTensorDesc,
687
+ int *n,
688
+ int *c,
689
+ int *h,
690
+ int *w);
691
+
692
+ /* Destroy an instance of pooling descriptor */
693
+ cudnnStatus_t CUDNNWINAPI
694
+ cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc);
695
+
696
+ /* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */
697
+
698
+ /* Function to perform forward pooling */
699
+ cudnnStatus_t CUDNNWINAPI
700
+ cudnnPoolingForward(cudnnHandle_t handle,
701
+ const cudnnPoolingDescriptor_t poolingDesc,
702
+ const void *alpha,
703
+ const cudnnTensorDescriptor_t xDesc,
704
+ const void *x,
705
+ const void *beta,
706
+ const cudnnTensorDescriptor_t yDesc,
707
+ void *y);
708
+
709
+ /*
710
+ * activation mode
711
+ */
712
+ typedef enum {
713
+ CUDNN_ACTIVATION_SIGMOID = 0,
714
+ CUDNN_ACTIVATION_RELU = 1,
715
+ CUDNN_ACTIVATION_TANH = 2,
716
+ CUDNN_ACTIVATION_CLIPPED_RELU = 3,
717
+ CUDNN_ACTIVATION_ELU = 4,
718
+ CUDNN_ACTIVATION_IDENTITY = 5,
719
+ CUDNN_ACTIVATION_SWISH = 6
720
+ } cudnnActivationMode_t;
721
+
722
+ /* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */
723
+ cudnnStatus_t CUDNNWINAPI
724
+ cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc);
725
+
726
+ cudnnStatus_t CUDNNWINAPI
727
+ cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc,
728
+ cudnnActivationMode_t mode,
729
+ cudnnNanPropagation_t reluNanOpt,
730
+ double coef); /* ceiling for clipped RELU, alpha for ELU */
731
+
732
+ cudnnStatus_t CUDNNWINAPI
733
+ cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc,
734
+ cudnnActivationMode_t *mode,
735
+ cudnnNanPropagation_t *reluNanOpt,
736
+ double *coef); /* ceiling for clipped RELU, alpha for ELU */
737
+
738
+ cudnnStatus_t CUDNNWINAPI
739
+ cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta);
740
+
741
+ cudnnStatus_t CUDNNWINAPI
742
+ cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta);
743
+
744
+ cudnnStatus_t CUDNNWINAPI
745
+ cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc);
746
+
747
+ /* Function to perform forward activation */
748
+ cudnnStatus_t CUDNNWINAPI
749
+ cudnnActivationForward(cudnnHandle_t handle,
750
+ cudnnActivationDescriptor_t activationDesc,
751
+ const void *alpha,
752
+ const cudnnTensorDescriptor_t xDesc,
753
+ const void *x,
754
+ const void *beta,
755
+ const cudnnTensorDescriptor_t yDesc,
756
+ void *y);
757
+
758
+ /*
759
+ * Create an instance of LRN (Local Response Normalization) descriptor
760
+ * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper
761
+ */
762
+ cudnnStatus_t CUDNNWINAPI
763
+ cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc);
764
+
765
+ #define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */
766
+ #define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */
767
+ #define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */
768
+ #define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */
769
+
770
+ /* LRN layer mode */
771
+ typedef enum {
772
+ CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */
773
+ } cudnnLRNMode_t;
774
+
775
+ /*
776
+ * Uses a window [center-lookBehind, center+lookAhead], where
777
+ * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1.
778
+ * Values of double parameters cast to tensor data type.
779
+ */
780
+ cudnnStatus_t CUDNNWINAPI
781
+ cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK);
782
+ /*
783
+ * Retrieve the settings currently stored in an LRN layer descriptor
784
+ * Any of the provided pointers can be NULL (no corresponding value will be returned)
785
+ */
786
+ cudnnStatus_t CUDNNWINAPI
787
+ cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK);
788
+
789
+ /* Destroy an instance of LRN descriptor */
790
+ cudnnStatus_t CUDNNWINAPI
791
+ cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc);
792
+
793
+ /* LRN functions: output = alpha * normalize(x) + beta * old_y */
794
+
795
+ /* LRN cross-channel forward computation. Double parameters cast to tensor data type */
796
+ cudnnStatus_t CUDNNWINAPI
797
+ cudnnLRNCrossChannelForward(cudnnHandle_t handle,
798
+ cudnnLRNDescriptor_t normDesc,
799
+ cudnnLRNMode_t lrnMode,
800
+ const void *alpha,
801
+ const cudnnTensorDescriptor_t xDesc,
802
+ const void *x,
803
+ const void *beta,
804
+ const cudnnTensorDescriptor_t yDesc,
805
+ void *y);
806
+
807
+ typedef enum {
808
+ CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0,
809
+ } cudnnDivNormMode_t;
810
+
811
+ /* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */
812
+ cudnnStatus_t CUDNNWINAPI
813
+ cudnnDivisiveNormalizationForward(cudnnHandle_t handle,
814
+ cudnnLRNDescriptor_t normDesc,
815
+ cudnnDivNormMode_t mode,
816
+ const void *alpha,
817
+ const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */
818
+ const void *x,
819
+ const void *means, /* if NULL, means are assumed to be zero */
820
+ void *temp,
821
+ void *temp2,
822
+ const void *beta,
823
+ const cudnnTensorDescriptor_t yDesc,
824
+ void *y);
825
+
826
+ typedef enum {
827
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
828
+ CUDNN_BATCHNORM_PER_ACTIVATION = 0,
829
+
830
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
831
+ CUDNN_BATCHNORM_SPATIAL = 1,
832
+
833
+ /*
834
+ * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors).
835
+ * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values
836
+ */
837
+ CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2,
838
+ } cudnnBatchNormMode_t;
839
+
840
+ #define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */
841
+
842
+ /*
843
+ * Derives a tensor descriptor from layer data descriptor for BatchNormalization
844
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
845
+ * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions.
846
+ */
847
+ cudnnStatus_t CUDNNWINAPI
848
+ cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc,
849
+ const cudnnTensorDescriptor_t xDesc,
850
+ cudnnBatchNormMode_t mode);
851
+
852
+ typedef enum {
853
+ CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */
854
+ CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */
855
+ CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */
856
+ } cudnnBatchNormOps_t;
857
+
858
+ /*
859
+ * Performs Batch Normalization during Inference:
860
+ * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k]
861
+ * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed
862
+ * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining
863
+ * above for notes on function arguments.
864
+ */
865
+ cudnnStatus_t CUDNNWINAPI
866
+ cudnnBatchNormalizationForwardInference(cudnnHandle_t handle,
867
+ cudnnBatchNormMode_t mode,
868
+ const void *alpha, /* alpha[0] = result blend factor */
869
+ const void *beta, /* beta[0] = dest layer blend factor */
870
+ const cudnnTensorDescriptor_t xDesc,
871
+ const void *x, /* NxCxHxW */
872
+ const cudnnTensorDescriptor_t yDesc,
873
+ void *y, /* NxCxHxW */
874
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
875
+ const void *bnScale,
876
+ const void *bnBias,
877
+ const void *estimatedMean,
878
+ const void *estimatedVariance,
879
+ double epsilon);
880
+
881
+ typedef enum {
882
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
883
+ CUDNN_NORM_PER_ACTIVATION = 0,
884
+
885
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
886
+ CUDNN_NORM_PER_CHANNEL = 1,
887
+ } cudnnNormMode_t;
888
+
889
+ typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t;
890
+
891
+ /*
892
+ * Derives a tensor descriptor from layer data descriptor for Normalization
893
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
894
+ * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions.
895
+ */
896
+ cudnnStatus_t CUDNNWINAPI
897
+ cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc,
898
+ cudnnTensorDescriptor_t derivedNormMeanVarDesc,
899
+ const cudnnTensorDescriptor_t xDesc,
900
+ cudnnNormMode_t mode,
901
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
902
+
903
+ typedef enum {
904
+ CUDNN_NORM_OPS_NORM = 0, /* do normalization only */
905
+ CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */
906
+ CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */
907
+ } cudnnNormOps_t;
908
+
909
+ /*
910
+ * Performs Normalization during Inference:
911
+ * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k]
912
+ * with normScale, normBias, runningMean, runningInvVariance tensors indexed
913
+ * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining
914
+ * above for notes on function arguments.
915
+ */
916
+ cudnnStatus_t CUDNNWINAPI
917
+ cudnnNormalizationForwardInference(cudnnHandle_t handle,
918
+ cudnnNormMode_t mode,
919
+ cudnnNormOps_t normOps,
920
+ cudnnNormAlgo_t algo,
921
+ const void *alpha, /* alpha[0] = result blend factor */
922
+ const void *beta, /* beta[0] = dest layer blend factor */
923
+ const cudnnTensorDescriptor_t xDesc,
924
+ const void *x, /* NxCxHxW */
925
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
926
+ const void *normScale,
927
+ const void *normBias,
928
+ const cudnnTensorDescriptor_t normMeanVarDesc,
929
+ const void *estimatedMean,
930
+ const void *estimatedVariance,
931
+ const cudnnTensorDescriptor_t zDesc,
932
+ const void *z,
933
+ cudnnActivationDescriptor_t activationDesc,
934
+ const cudnnTensorDescriptor_t yDesc,
935
+ void *y, /* NxCxHxW */
936
+ double epsilon,
937
+ int groupCnt); /* Place hold for future work*/
938
+
939
+ /* APIs for spatial transformer network*/
940
+ typedef enum {
941
+ CUDNN_SAMPLER_BILINEAR = 0,
942
+ } cudnnSamplerType_t;
943
+
944
+ cudnnStatus_t CUDNNWINAPI
945
+ cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc);
946
+
947
+ cudnnStatus_t CUDNNWINAPI
948
+ cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc,
949
+ cudnnSamplerType_t samplerType,
950
+ cudnnDataType_t dataType,
951
+ const int nbDims,
952
+ const int dimA[]);
953
+
954
+ cudnnStatus_t CUDNNWINAPI
955
+ cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc);
956
+
957
+ cudnnStatus_t CUDNNWINAPI
958
+ cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle,
959
+ const cudnnSpatialTransformerDescriptor_t stDesc,
960
+ const void *theta,
961
+ void *grid);
962
+
963
+ cudnnStatus_t CUDNNWINAPI
964
+ cudnnSpatialTfSamplerForward(cudnnHandle_t handle,
965
+ cudnnSpatialTransformerDescriptor_t stDesc,
966
+ const void *alpha,
967
+ const cudnnTensorDescriptor_t xDesc,
968
+ const void *x,
969
+ const void *grid,
970
+ const void *beta,
971
+ cudnnTensorDescriptor_t yDesc,
972
+ void *y);
973
+
974
+ typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t;
975
+
976
+ cudnnStatus_t CUDNNWINAPI
977
+ cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc);
978
+
979
+ cudnnStatus_t CUDNNWINAPI
980
+ cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc);
981
+
982
+ /*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */
983
+ cudnnStatus_t CUDNNWINAPI
984
+ cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes);
985
+
986
+ /*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */
987
+ cudnnStatus_t CUDNNWINAPI
988
+ cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes);
989
+
990
+ cudnnStatus_t CUDNNWINAPI
991
+ cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
992
+ cudnnHandle_t handle,
993
+ float dropout,
994
+ void *states,
995
+ size_t stateSizeInBytes,
996
+ unsigned long long seed);
997
+
998
+ /* Restores the dropout descriptor to a previously saved-off state */
999
+ cudnnStatus_t CUDNNWINAPI
1000
+ cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1001
+ cudnnHandle_t handle,
1002
+ float dropout,
1003
+ void *states,
1004
+ size_t stateSizeInBytes,
1005
+ unsigned long long seed);
1006
+
1007
+ cudnnStatus_t CUDNNWINAPI
1008
+ cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1009
+ cudnnHandle_t handle,
1010
+ float *dropout,
1011
+ void **states,
1012
+ unsigned long long *seed);
1013
+
1014
+ cudnnStatus_t CUDNNWINAPI
1015
+ cudnnDropoutForward(cudnnHandle_t handle,
1016
+ const cudnnDropoutDescriptor_t dropoutDesc,
1017
+ const cudnnTensorDescriptor_t xdesc,
1018
+ const void *x,
1019
+ const cudnnTensorDescriptor_t ydesc,
1020
+ void *y,
1021
+ void *reserveSpace,
1022
+ size_t reserveSpaceSizeInBytes);
1023
+
1024
+ /* TODO: remove */
1025
+
1026
+ typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t;
1027
+ typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t;
1028
+
1029
+ /* TODO: move these enums out to the appropriate submodule */
1030
+ typedef enum {
1031
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0,
1032
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1,
1033
+ CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2,
1034
+ CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3,
1035
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4,
1036
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5,
1037
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6,
1038
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7,
1039
+ CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8
1040
+ } cudnnConvolutionFwdAlgo_t;
1041
+
1042
+ typedef enum {
1043
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */
1044
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1,
1045
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2,
1046
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */
1047
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */
1048
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5,
1049
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6,
1050
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7
1051
+ } cudnnConvolutionBwdFilterAlgo_t;
1052
+
1053
+ typedef enum {
1054
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */
1055
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1,
1056
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2,
1057
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3,
1058
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4,
1059
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5,
1060
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6
1061
+ } cudnnConvolutionBwdDataAlgo_t;
1062
+
1063
+ typedef enum {
1064
+ CUDNN_RNN_ALGO_STANDARD = 0,
1065
+ CUDNN_RNN_ALGO_PERSIST_STATIC = 1,
1066
+ CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2,
1067
+ CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3,
1068
+ CUDNN_RNN_ALGO_COUNT = 4,
1069
+ } cudnnRNNAlgo_t;
1070
+
1071
+ typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t;
1072
+
1073
+ /* TODO: remove */
1074
+ typedef struct cudnnAlgorithmUnionStruct {
1075
+ union Algorithm {
1076
+ cudnnConvolutionFwdAlgo_t convFwdAlgo;
1077
+ cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo;
1078
+ cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo;
1079
+ cudnnRNNAlgo_t RNNAlgo;
1080
+ cudnnCTCLossAlgo_t CTCLossAlgo;
1081
+ } algo;
1082
+ } cudnnAlgorithm_t;
1083
+
1084
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1085
+ cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc);
1086
+
1087
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1088
+ cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm);
1089
+
1090
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1091
+ cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm);
1092
+
1093
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1094
+ cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest);
1095
+
1096
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1097
+ cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc);
1098
+
1099
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1100
+ cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate);
1101
+
1102
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1103
+ cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf,
1104
+ cudnnAlgorithmDescriptor_t algoDesc,
1105
+ cudnnStatus_t status,
1106
+ float time,
1107
+ size_t memory);
1108
+
1109
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1110
+ cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf,
1111
+ cudnnAlgorithmDescriptor_t *algoDesc,
1112
+ cudnnStatus_t *status,
1113
+ float *time,
1114
+ size_t *memory);
1115
+
1116
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1117
+ cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy);
1118
+
1119
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1120
+ cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes);
1121
+
1122
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1123
+ cudnnSaveAlgorithm(cudnnHandle_t handle,
1124
+ cudnnAlgorithmDescriptor_t algoDesc,
1125
+ void *algoSpace,
1126
+ size_t algoSpaceSizeInBytes);
1127
+
1128
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1129
+ cudnnRestoreAlgorithm(cudnnHandle_t handle,
1130
+ void *algoSpace,
1131
+ size_t algoSpaceSizeInBytes,
1132
+ cudnnAlgorithmDescriptor_t algoDesc);
1133
+
1134
+ typedef enum {
1135
+ CUDNN_SEV_FATAL = 0,
1136
+ CUDNN_SEV_ERROR = 1,
1137
+ CUDNN_SEV_WARNING = 2,
1138
+ CUDNN_SEV_INFO = 3,
1139
+ } cudnnSeverity_t;
1140
+
1141
+ /* Message masks to be used with cudnnSetCallback() */
1142
+ #define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR)
1143
+ #define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING)
1144
+ #define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO)
1145
+
1146
+ /* struct containing useful informaiton for each API call */
1147
+ typedef struct cudnnDebugStruct {
1148
+ unsigned cudnn_version;
1149
+ cudnnStatus_t cudnnStatus;
1150
+ unsigned time_sec; /* epoch time in seconds */
1151
+ unsigned time_usec; /* microseconds part of epoch time */
1152
+ unsigned time_delta; /* time since start in seconds */
1153
+ cudnnHandle_t handle; /* cudnn handle */
1154
+ cudaStream_t stream; /* cuda stream ID */
1155
+ unsigned long long pid; /* process ID */
1156
+ unsigned long long tid; /* thread ID */
1157
+ int cudaDeviceId; /* CUDA device ID */
1158
+ int reserved[15]; /* reserved for future use */
1159
+ } cudnnDebug_t;
1160
+
1161
+ typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg);
1162
+
1163
+ cudnnStatus_t CUDNNWINAPI
1164
+ cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr);
1165
+
1166
+ cudnnStatus_t CUDNNWINAPI
1167
+ cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr);
1168
+
1169
+ /*
1170
+ * \brief Cross-library version checker.
1171
+ * This function is implemented differently in each sub-library. Each sublib
1172
+ * checks whether its own version matches that of its dependencies.
1173
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
1174
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
1175
+ */
1176
+ cudnnStatus_t CUDNNWINAPI
1177
+ cudnnOpsInferVersionCheck(void);
1178
+
1179
+ #if defined(__cplusplus)
1180
+ }
1181
+ #endif
1182
+
1183
+ #endif /* CUDNN_OPS_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_infer : cuDNN's basic definitions and inference operations.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_INFER_H_)
55
+ #define CUDNN_OPS_INFER_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+
62
+ /* These version numbers are autogenerated, do not edit manually. */
63
+ #define CUDNN_OPS_INFER_MAJOR 8
64
+ #define CUDNN_OPS_INFER_MINOR 9
65
+ #define CUDNN_OPS_INFER_PATCH 2
66
+
67
+ #if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \
68
+ (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL)
69
+ #error Version mismatch in cuDNN OPS INFER!!!
70
+ #endif
71
+
72
+ #ifndef CUDNNWINAPI
73
+ #ifdef _WIN32
74
+ #define CUDNNWINAPI __stdcall
75
+ #else
76
+ #define CUDNNWINAPI
77
+ #endif
78
+ #endif
79
+
80
+ /* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */
81
+ #if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__))
82
+ /* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */
83
+ #define CUDNN_DEPRECATED __attribute__((deprecated))
84
+ #elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER)
85
+ /* Microsoft Visual C++ */
86
+ #define CUDNN_DEPRECATED __declspec(deprecated)
87
+ #elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L)
88
+ /* C++14 compilers */
89
+ #define CUDNN_DEPRECATED [[deprecated]]
90
+ #else
91
+ /* No support for the deprecated attribute */
92
+ #define CUDNN_DEPRECATED
93
+ #endif
94
+
95
+ #if defined(__cplusplus)
96
+ extern "C" {
97
+ #endif
98
+
99
+ struct cudnnContext;
100
+ typedef struct cudnnContext *cudnnHandle_t;
101
+
102
+ size_t CUDNNWINAPI
103
+ cudnnGetVersion(void);
104
+
105
+ size_t CUDNNWINAPI
106
+ cudnnGetMaxDeviceVersion(void);
107
+
108
+ /* Returns CUDA Runtime version statically linked against cudnn */
109
+ size_t CUDNNWINAPI
110
+ cudnnGetCudartVersion(void);
111
+
112
+ /*
113
+ * CUDNN return codes
114
+ */
115
+ typedef enum {
116
+ CUDNN_STATUS_SUCCESS = 0,
117
+ CUDNN_STATUS_NOT_INITIALIZED = 1,
118
+ CUDNN_STATUS_ALLOC_FAILED = 2,
119
+ CUDNN_STATUS_BAD_PARAM = 3,
120
+ CUDNN_STATUS_INTERNAL_ERROR = 4,
121
+ CUDNN_STATUS_INVALID_VALUE = 5,
122
+ CUDNN_STATUS_ARCH_MISMATCH = 6,
123
+ CUDNN_STATUS_MAPPING_ERROR = 7,
124
+ CUDNN_STATUS_EXECUTION_FAILED = 8,
125
+ CUDNN_STATUS_NOT_SUPPORTED = 9,
126
+ CUDNN_STATUS_LICENSE_ERROR = 10,
127
+ CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11,
128
+ CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12,
129
+ CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13,
130
+ CUDNN_STATUS_VERSION_MISMATCH = 14,
131
+ } cudnnStatus_t;
132
+
133
+ /* human-readable error messages */
134
+ const char *CUDNNWINAPI
135
+ cudnnGetErrorString(cudnnStatus_t status);
136
+
137
+ /* Forward definition in this version only */
138
+ typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t;
139
+
140
+ typedef enum {
141
+ CUDNN_ERRQUERY_RAWCODE = 0,
142
+ CUDNN_ERRQUERY_NONBLOCKING = 1,
143
+ CUDNN_ERRQUERY_BLOCKING = 2,
144
+ } cudnnErrQueryMode_t;
145
+
146
+ cudnnStatus_t CUDNNWINAPI
147
+ cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag);
148
+
149
+ #ifndef __LIBRARY_TYPES_H__
150
+
151
+ typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType;
152
+
153
+ #endif
154
+
155
+ cudnnStatus_t CUDNNWINAPI
156
+ cudnnGetProperty(libraryPropertyType type, int *value);
157
+
158
+ cudnnStatus_t CUDNNWINAPI
159
+ cudnnCreate(cudnnHandle_t *handle);
160
+ cudnnStatus_t CUDNNWINAPI
161
+ cudnnDestroy(cudnnHandle_t handle);
162
+ cudnnStatus_t CUDNNWINAPI
163
+ cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId);
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId);
166
+
167
+ /* Data structures to represent Image/Filter and the Neural Network Layer */
168
+ typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t;
169
+ typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t;
170
+ typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t;
171
+ typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t;
172
+ typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t;
173
+ typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t;
174
+ typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t;
175
+ typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t;
176
+ typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t;
177
+ typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t;
178
+ /*
179
+ * CUDNN data type
180
+ */
181
+ typedef enum {
182
+ CUDNN_DATA_FLOAT = 0,
183
+ CUDNN_DATA_DOUBLE = 1,
184
+ CUDNN_DATA_HALF = 2,
185
+ CUDNN_DATA_INT8 = 3,
186
+ CUDNN_DATA_INT32 = 4,
187
+ CUDNN_DATA_INT8x4 = 5,
188
+ CUDNN_DATA_UINT8 = 6,
189
+ CUDNN_DATA_UINT8x4 = 7,
190
+ CUDNN_DATA_INT8x32 = 8,
191
+ CUDNN_DATA_BFLOAT16 = 9,
192
+ CUDNN_DATA_INT64 = 10,
193
+ CUDNN_DATA_BOOLEAN = 11,
194
+ CUDNN_DATA_FP8_E4M3 = 12,
195
+ CUDNN_DATA_FP8_E5M2 = 13,
196
+ CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14,
197
+ } cudnnDataType_t;
198
+
199
+ /*
200
+ * CUDNN math type
201
+ */
202
+ typedef enum {
203
+ CUDNN_DEFAULT_MATH = 0,
204
+ CUDNN_TENSOR_OP_MATH = 1,
205
+ CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2,
206
+ CUDNN_FMA_MATH = 3,
207
+ } cudnnMathType_t;
208
+
209
+ /*
210
+ * CUDNN propagate Nan
211
+ */
212
+ typedef enum {
213
+ CUDNN_NOT_PROPAGATE_NAN = 0,
214
+ CUDNN_PROPAGATE_NAN = 1,
215
+ } cudnnNanPropagation_t;
216
+
217
+ /*
218
+ * CUDNN Determinism
219
+ */
220
+ typedef enum {
221
+ CUDNN_NON_DETERMINISTIC = 0,
222
+ CUDNN_DETERMINISTIC = 1,
223
+ } cudnnDeterminism_t;
224
+
225
+ /* Maximum supported number of tensor dimensions */
226
+ #define CUDNN_DIM_MAX 8
227
+
228
+ /* Create an instance of a generic Tensor descriptor */
229
+ cudnnStatus_t CUDNNWINAPI
230
+ cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc);
231
+
232
+ typedef enum {
233
+ CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */
234
+ CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/
235
+ CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */
236
+ } cudnnTensorFormat_t;
237
+
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc,
240
+ cudnnTensorFormat_t format,
241
+ cudnnDataType_t dataType, /* image data type */
242
+ int n, /* number of inputs (batch size) */
243
+ int c, /* number of input feature maps */
244
+ int h, /* height of input section */
245
+ int w); /* width of input section */
246
+
247
+ cudnnStatus_t CUDNNWINAPI
248
+ cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
249
+ cudnnDataType_t dataType, /* image data type */
250
+ int n, /* number of inputs (batch size) */
251
+ int c, /* number of input feature maps */
252
+ int h, /* height of input section */
253
+ int w, /* width of input section */
254
+ int nStride,
255
+ int cStride,
256
+ int hStride,
257
+ int wStride);
258
+
259
+ cudnnStatus_t CUDNNWINAPI
260
+ cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc,
261
+ cudnnDataType_t *dataType, /* image data type */
262
+ int *n, /* number of inputs (batch size) */
263
+ int *c, /* number of input feature maps */
264
+ int *h, /* height of input section */
265
+ int *w, /* width of input section */
266
+ int *nStride,
267
+ int *cStride,
268
+ int *hStride,
269
+ int *wStride);
270
+
271
+ cudnnStatus_t CUDNNWINAPI
272
+ cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc,
273
+ cudnnDataType_t dataType,
274
+ int nbDims,
275
+ const int dimA[],
276
+ const int strideA[]);
277
+
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc,
280
+ cudnnTensorFormat_t format,
281
+ cudnnDataType_t dataType,
282
+ int nbDims,
283
+ const int dimA[]);
284
+
285
+ cudnnStatus_t CUDNNWINAPI
286
+ cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc,
287
+ int nbDimsRequested,
288
+ cudnnDataType_t *dataType,
289
+ int *nbDims,
290
+ int dimA[],
291
+ int strideA[]);
292
+
293
+ cudnnStatus_t CUDNNWINAPI
294
+ cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size);
295
+
296
+ /* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride
297
+
298
+ 1)Example of all images in row major order one batch of features after the other (with an optional padding on row)
299
+ input_stride : c x h x h_stride
300
+ feature_stride : h x h_stride
301
+ h_stride : >= w ( h_stride = w if no padding)
302
+ w_stride : 1
303
+
304
+
305
+ 2)Example of all images in row major with features maps interleaved
306
+ input_stride : c x h x h_stride
307
+ feature_stride : 1
308
+ h_stride : w x c
309
+ w_stride : c
310
+
311
+ 3)Example of all images in column major order one batch of features after the other (with optional padding on column)
312
+ input_stride : c x w x w_stride
313
+ feature_stride : w x w_stride
314
+ h_stride : 1
315
+ w_stride : >= h
316
+
317
+ */
318
+
319
+ /* Destroy an instance of Tensor4d descriptor */
320
+ cudnnStatus_t CUDNNWINAPI
321
+ cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc);
322
+
323
+ /* Fold/unfold transforms */
324
+ typedef enum {
325
+ CUDNN_TRANSFORM_FOLD = 0U,
326
+ CUDNN_TRANSFORM_UNFOLD = 1U,
327
+ } cudnnFoldingDirection_t;
328
+
329
+ /** Create a destination descriptor for cudnnTransformTensor */
330
+ cudnnStatus_t CUDNNWINAPI
331
+ cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc,
332
+ const cudnnTensorDescriptor_t srcDesc,
333
+ cudnnTensorDescriptor_t destDesc,
334
+ size_t *destSizeInBytes);
335
+
336
+ /** Create an empty tensor transform descriptor */
337
+ cudnnStatus_t CUDNNWINAPI
338
+ cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc);
339
+
340
+ /** Initialize a previously created tensor transform descriptor. */
341
+ cudnnStatus_t CUDNNWINAPI
342
+ cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
343
+ const uint32_t nbDims,
344
+ const cudnnTensorFormat_t destFormat,
345
+ const int32_t padBeforeA[],
346
+ const int32_t padAfterA[],
347
+ const uint32_t foldA[],
348
+ const cudnnFoldingDirection_t direction);
349
+
350
+ /**
351
+ * Retrieves the values stored in a previously initialized tensor transform
352
+ * descriptor.
353
+ */
354
+ cudnnStatus_t CUDNNWINAPI
355
+ cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc,
356
+ uint32_t nbDimsRequested,
357
+ cudnnTensorFormat_t *destFormat,
358
+ int32_t padBeforeA[],
359
+ int32_t padAfterA[],
360
+ uint32_t foldA[],
361
+ cudnnFoldingDirection_t *direction);
362
+
363
+ /**
364
+ * Destroys a previously created tensor transform descriptor.
365
+ */
366
+ cudnnStatus_t CUDNNWINAPI
367
+ cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc);
368
+
369
+ /* Tensor layout conversion helper (y = alpha * x + beta * y) */
370
+ cudnnStatus_t CUDNNWINAPI
371
+ cudnnTransformTensor(cudnnHandle_t handle,
372
+ const void *alpha,
373
+ const cudnnTensorDescriptor_t xDesc,
374
+ const void *x,
375
+ const void *beta,
376
+ const cudnnTensorDescriptor_t yDesc,
377
+ void *y);
378
+
379
+ cudnnStatus_t CUDNNWINAPI
380
+ cudnnTransformTensorEx(cudnnHandle_t handle,
381
+ const cudnnTensorTransformDescriptor_t transDesc,
382
+ const void *alpha,
383
+ const cudnnTensorDescriptor_t srcDesc,
384
+ const void *srcData,
385
+ const void *beta,
386
+ const cudnnTensorDescriptor_t destDesc,
387
+ void *destData);
388
+
389
+ /* Tensor Bias addition : C = alpha * A + beta * C */
390
+ cudnnStatus_t CUDNNWINAPI
391
+ cudnnAddTensor(cudnnHandle_t handle,
392
+ const void *alpha,
393
+ const cudnnTensorDescriptor_t aDesc,
394
+ const void *A,
395
+ const void *beta,
396
+ const cudnnTensorDescriptor_t cDesc,
397
+ void *C);
398
+
399
+ /*
400
+ * CUDNN OpTensor op type
401
+ */
402
+ typedef enum {
403
+ CUDNN_OP_TENSOR_ADD = 0,
404
+ CUDNN_OP_TENSOR_MUL = 1,
405
+ CUDNN_OP_TENSOR_MIN = 2,
406
+ CUDNN_OP_TENSOR_MAX = 3,
407
+ CUDNN_OP_TENSOR_SQRT = 4,
408
+ CUDNN_OP_TENSOR_NOT = 5,
409
+ } cudnnOpTensorOp_t;
410
+
411
+ cudnnStatus_t CUDNNWINAPI
412
+ cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc);
413
+
414
+ cudnnStatus_t CUDNNWINAPI
415
+ cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc,
416
+ cudnnOpTensorOp_t opTensorOp,
417
+ cudnnDataType_t opTensorCompType,
418
+ cudnnNanPropagation_t opTensorNanOpt);
419
+
420
+ cudnnStatus_t CUDNNWINAPI
421
+ cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc,
422
+ cudnnOpTensorOp_t *opTensorOp,
423
+ cudnnDataType_t *opTensorCompType,
424
+ cudnnNanPropagation_t *opTensorNanOpt);
425
+
426
+ cudnnStatus_t CUDNNWINAPI
427
+ cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc);
428
+
429
+ /* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */
430
+ /* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */
431
+ cudnnStatus_t CUDNNWINAPI
432
+ cudnnOpTensor(cudnnHandle_t handle,
433
+ const cudnnOpTensorDescriptor_t opTensorDesc,
434
+ const void *alpha1,
435
+ const cudnnTensorDescriptor_t aDesc,
436
+ const void *A,
437
+ const void *alpha2,
438
+ const cudnnTensorDescriptor_t bDesc,
439
+ const void *B,
440
+ const void *beta,
441
+ const cudnnTensorDescriptor_t cDesc,
442
+ void *C);
443
+
444
+ /*
445
+ * CUDNN ReduceTensor op type
446
+ */
447
+ typedef enum {
448
+ CUDNN_REDUCE_TENSOR_ADD = 0,
449
+ CUDNN_REDUCE_TENSOR_MUL = 1,
450
+ CUDNN_REDUCE_TENSOR_MIN = 2,
451
+ CUDNN_REDUCE_TENSOR_MAX = 3,
452
+ CUDNN_REDUCE_TENSOR_AMAX = 4,
453
+ CUDNN_REDUCE_TENSOR_AVG = 5,
454
+ CUDNN_REDUCE_TENSOR_NORM1 = 6,
455
+ CUDNN_REDUCE_TENSOR_NORM2 = 7,
456
+ CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8,
457
+ } cudnnReduceTensorOp_t;
458
+
459
+ /*
460
+ * CUDNN ReduceTensor indices type
461
+ */
462
+ typedef enum {
463
+ CUDNN_REDUCE_TENSOR_NO_INDICES = 0,
464
+ CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1,
465
+ } cudnnReduceTensorIndices_t;
466
+
467
+ /*
468
+ * CUDNN tensor indices type size (all unsigned)
469
+ * Currently not supported, default is 32 bit unsigned.
470
+ */
471
+ typedef enum {
472
+ CUDNN_32BIT_INDICES = 0,
473
+ CUDNN_64BIT_INDICES = 1,
474
+ CUDNN_16BIT_INDICES = 2,
475
+ CUDNN_8BIT_INDICES = 3,
476
+ } cudnnIndicesType_t;
477
+
478
+ cudnnStatus_t CUDNNWINAPI
479
+ cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc);
480
+
481
+ cudnnStatus_t CUDNNWINAPI
482
+ cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc,
483
+ cudnnReduceTensorOp_t reduceTensorOp,
484
+ cudnnDataType_t reduceTensorCompType,
485
+ cudnnNanPropagation_t reduceTensorNanOpt,
486
+ cudnnReduceTensorIndices_t reduceTensorIndices,
487
+ cudnnIndicesType_t reduceTensorIndicesType);
488
+
489
+ cudnnStatus_t CUDNNWINAPI
490
+ cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc,
491
+ cudnnReduceTensorOp_t *reduceTensorOp,
492
+ cudnnDataType_t *reduceTensorCompType,
493
+ cudnnNanPropagation_t *reduceTensorNanOpt,
494
+ cudnnReduceTensorIndices_t *reduceTensorIndices,
495
+ cudnnIndicesType_t *reduceTensorIndicesType);
496
+
497
+ cudnnStatus_t CUDNNWINAPI
498
+ cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc);
499
+
500
+ /* Helper function to return the minimum size of the index space to be passed to the reduction given the input and
501
+ * output tensors */
502
+ cudnnStatus_t CUDNNWINAPI
503
+ cudnnGetReductionIndicesSize(cudnnHandle_t handle,
504
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
505
+ const cudnnTensorDescriptor_t aDesc,
506
+ const cudnnTensorDescriptor_t cDesc,
507
+ size_t *sizeInBytes);
508
+
509
+ /* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output
510
+ * tensors */
511
+ cudnnStatus_t CUDNNWINAPI
512
+ cudnnGetReductionWorkspaceSize(cudnnHandle_t handle,
513
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
514
+ const cudnnTensorDescriptor_t aDesc,
515
+ const cudnnTensorDescriptor_t cDesc,
516
+ size_t *sizeInBytes);
517
+
518
+ /* Tensor operation : C = reduce op( alpha * A ) + beta * C */
519
+ /* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */
520
+ /* The indices space is ignored for reduce ops other than min or max. */
521
+ cudnnStatus_t CUDNNWINAPI
522
+ cudnnReduceTensor(cudnnHandle_t handle,
523
+ const cudnnReduceTensorDescriptor_t reduceTensorDesc,
524
+ void *indices,
525
+ size_t indicesSizeInBytes,
526
+ void *workspace,
527
+ size_t workspaceSizeInBytes,
528
+ const void *alpha,
529
+ const cudnnTensorDescriptor_t aDesc,
530
+ const void *A,
531
+ const void *beta,
532
+ const cudnnTensorDescriptor_t cDesc,
533
+ void *C);
534
+
535
+ /* Set all values of a tensor to a given value : y[i] = value[0] */
536
+ cudnnStatus_t CUDNNWINAPI
537
+ cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr);
538
+
539
+ /* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */
540
+ cudnnStatus_t CUDNNWINAPI
541
+ cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha);
542
+
543
+ /* Create an instance of FilterStruct */
544
+ cudnnStatus_t CUDNNWINAPI
545
+ cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc);
546
+
547
+ cudnnStatus_t CUDNNWINAPI
548
+ cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc,
549
+ cudnnDataType_t dataType, /* image data type */
550
+ cudnnTensorFormat_t format,
551
+ int k, /* number of output feature maps */
552
+ int c, /* number of input feature maps */
553
+ int h, /* height of each input filter */
554
+ int w); /* width of each input filter */
555
+
556
+ cudnnStatus_t CUDNNWINAPI
557
+ cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc,
558
+ cudnnDataType_t *dataType, /* image data type */
559
+ cudnnTensorFormat_t *format,
560
+ int *k, /* number of output feature maps */
561
+ int *c, /* number of input feature maps */
562
+ int *h, /* height of each input filter */
563
+ int *w); /* width of each input filter */
564
+
565
+ cudnnStatus_t CUDNNWINAPI
566
+ cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc,
567
+ cudnnDataType_t dataType, /* image data type */
568
+ cudnnTensorFormat_t format,
569
+ int nbDims,
570
+ const int filterDimA[]);
571
+
572
+ cudnnStatus_t CUDNNWINAPI
573
+ cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc,
574
+ int nbDimsRequested,
575
+ cudnnDataType_t *dataType, /* image data type */
576
+ cudnnTensorFormat_t *format,
577
+ int *nbDims,
578
+ int filterDimA[]);
579
+ cudnnStatus_t CUDNNWINAPI
580
+ cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size);
581
+
582
+ cudnnStatus_t CUDNNWINAPI
583
+ cudnnTransformFilter(cudnnHandle_t handle,
584
+ const cudnnTensorTransformDescriptor_t transDesc,
585
+ const void *alpha,
586
+ const cudnnFilterDescriptor_t srcDesc,
587
+ const void *srcData,
588
+ const void *beta,
589
+ const cudnnFilterDescriptor_t destDesc,
590
+ void *destData);
591
+
592
+ cudnnStatus_t CUDNNWINAPI
593
+ cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc);
594
+
595
+ /*
596
+ * softmax algorithm
597
+ */
598
+ typedef enum {
599
+ CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */
600
+ CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */
601
+ CUDNN_SOFTMAX_LOG = 2
602
+ } cudnnSoftmaxAlgorithm_t;
603
+
604
+ typedef enum {
605
+ CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */
606
+ CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */
607
+ } cudnnSoftmaxMode_t;
608
+
609
+ /* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */
610
+
611
+ /* Function to perform forward softmax */
612
+ cudnnStatus_t CUDNNWINAPI
613
+ cudnnSoftmaxForward(cudnnHandle_t handle,
614
+ cudnnSoftmaxAlgorithm_t algo,
615
+ cudnnSoftmaxMode_t mode,
616
+ const void *alpha,
617
+ const cudnnTensorDescriptor_t xDesc,
618
+ const void *x,
619
+ const void *beta,
620
+ const cudnnTensorDescriptor_t yDesc,
621
+ void *y);
622
+
623
+ /*
624
+ * pooling mode
625
+ */
626
+ typedef enum {
627
+ CUDNN_POOLING_MAX = 0,
628
+ CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */
629
+ CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */
630
+ CUDNN_POOLING_MAX_DETERMINISTIC = 3
631
+ } cudnnPoolingMode_t;
632
+
633
+ /* Create an instance of pooling descriptor */
634
+ cudnnStatus_t CUDNNWINAPI
635
+ cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc);
636
+
637
+ cudnnStatus_t CUDNNWINAPI
638
+ cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc,
639
+ cudnnPoolingMode_t mode,
640
+ cudnnNanPropagation_t maxpoolingNanOpt,
641
+ int windowHeight,
642
+ int windowWidth,
643
+ int verticalPadding,
644
+ int horizontalPadding,
645
+ int verticalStride,
646
+ int horizontalStride);
647
+
648
+ cudnnStatus_t CUDNNWINAPI
649
+ cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
650
+ cudnnPoolingMode_t *mode,
651
+ cudnnNanPropagation_t *maxpoolingNanOpt,
652
+ int *windowHeight,
653
+ int *windowWidth,
654
+ int *verticalPadding,
655
+ int *horizontalPadding,
656
+ int *verticalStride,
657
+ int *horizontalStride);
658
+
659
+ cudnnStatus_t CUDNNWINAPI
660
+ cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc,
661
+ const cudnnPoolingMode_t mode,
662
+ const cudnnNanPropagation_t maxpoolingNanOpt,
663
+ int nbDims,
664
+ const int windowDimA[],
665
+ const int paddingA[],
666
+ const int strideA[]);
667
+
668
+ cudnnStatus_t CUDNNWINAPI
669
+ cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc,
670
+ int nbDimsRequested,
671
+ cudnnPoolingMode_t *mode,
672
+ cudnnNanPropagation_t *maxpoolingNanOpt,
673
+ int *nbDims,
674
+ int windowDimA[],
675
+ int paddingA[],
676
+ int strideA[]);
677
+
678
+ cudnnStatus_t CUDNNWINAPI
679
+ cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
680
+ const cudnnTensorDescriptor_t inputTensorDesc,
681
+ int nbDims,
682
+ int outputTensorDimA[]);
683
+
684
+ cudnnStatus_t CUDNNWINAPI
685
+ cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc,
686
+ const cudnnTensorDescriptor_t inputTensorDesc,
687
+ int *n,
688
+ int *c,
689
+ int *h,
690
+ int *w);
691
+
692
+ /* Destroy an instance of pooling descriptor */
693
+ cudnnStatus_t CUDNNWINAPI
694
+ cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc);
695
+
696
+ /* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */
697
+
698
+ /* Function to perform forward pooling */
699
+ cudnnStatus_t CUDNNWINAPI
700
+ cudnnPoolingForward(cudnnHandle_t handle,
701
+ const cudnnPoolingDescriptor_t poolingDesc,
702
+ const void *alpha,
703
+ const cudnnTensorDescriptor_t xDesc,
704
+ const void *x,
705
+ const void *beta,
706
+ const cudnnTensorDescriptor_t yDesc,
707
+ void *y);
708
+
709
+ /*
710
+ * activation mode
711
+ */
712
+ typedef enum {
713
+ CUDNN_ACTIVATION_SIGMOID = 0,
714
+ CUDNN_ACTIVATION_RELU = 1,
715
+ CUDNN_ACTIVATION_TANH = 2,
716
+ CUDNN_ACTIVATION_CLIPPED_RELU = 3,
717
+ CUDNN_ACTIVATION_ELU = 4,
718
+ CUDNN_ACTIVATION_IDENTITY = 5,
719
+ CUDNN_ACTIVATION_SWISH = 6
720
+ } cudnnActivationMode_t;
721
+
722
+ /* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */
723
+ cudnnStatus_t CUDNNWINAPI
724
+ cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc);
725
+
726
+ cudnnStatus_t CUDNNWINAPI
727
+ cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc,
728
+ cudnnActivationMode_t mode,
729
+ cudnnNanPropagation_t reluNanOpt,
730
+ double coef); /* ceiling for clipped RELU, alpha for ELU */
731
+
732
+ cudnnStatus_t CUDNNWINAPI
733
+ cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc,
734
+ cudnnActivationMode_t *mode,
735
+ cudnnNanPropagation_t *reluNanOpt,
736
+ double *coef); /* ceiling for clipped RELU, alpha for ELU */
737
+
738
+ cudnnStatus_t CUDNNWINAPI
739
+ cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta);
740
+
741
+ cudnnStatus_t CUDNNWINAPI
742
+ cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta);
743
+
744
+ cudnnStatus_t CUDNNWINAPI
745
+ cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc);
746
+
747
+ /* Function to perform forward activation */
748
+ cudnnStatus_t CUDNNWINAPI
749
+ cudnnActivationForward(cudnnHandle_t handle,
750
+ cudnnActivationDescriptor_t activationDesc,
751
+ const void *alpha,
752
+ const cudnnTensorDescriptor_t xDesc,
753
+ const void *x,
754
+ const void *beta,
755
+ const cudnnTensorDescriptor_t yDesc,
756
+ void *y);
757
+
758
+ /*
759
+ * Create an instance of LRN (Local Response Normalization) descriptor
760
+ * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper
761
+ */
762
+ cudnnStatus_t CUDNNWINAPI
763
+ cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc);
764
+
765
+ #define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */
766
+ #define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */
767
+ #define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */
768
+ #define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */
769
+
770
+ /* LRN layer mode */
771
+ typedef enum {
772
+ CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */
773
+ } cudnnLRNMode_t;
774
+
775
+ /*
776
+ * Uses a window [center-lookBehind, center+lookAhead], where
777
+ * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1.
778
+ * Values of double parameters cast to tensor data type.
779
+ */
780
+ cudnnStatus_t CUDNNWINAPI
781
+ cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK);
782
+ /*
783
+ * Retrieve the settings currently stored in an LRN layer descriptor
784
+ * Any of the provided pointers can be NULL (no corresponding value will be returned)
785
+ */
786
+ cudnnStatus_t CUDNNWINAPI
787
+ cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK);
788
+
789
+ /* Destroy an instance of LRN descriptor */
790
+ cudnnStatus_t CUDNNWINAPI
791
+ cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc);
792
+
793
+ /* LRN functions: output = alpha * normalize(x) + beta * old_y */
794
+
795
+ /* LRN cross-channel forward computation. Double parameters cast to tensor data type */
796
+ cudnnStatus_t CUDNNWINAPI
797
+ cudnnLRNCrossChannelForward(cudnnHandle_t handle,
798
+ cudnnLRNDescriptor_t normDesc,
799
+ cudnnLRNMode_t lrnMode,
800
+ const void *alpha,
801
+ const cudnnTensorDescriptor_t xDesc,
802
+ const void *x,
803
+ const void *beta,
804
+ const cudnnTensorDescriptor_t yDesc,
805
+ void *y);
806
+
807
+ typedef enum {
808
+ CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0,
809
+ } cudnnDivNormMode_t;
810
+
811
+ /* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */
812
+ cudnnStatus_t CUDNNWINAPI
813
+ cudnnDivisiveNormalizationForward(cudnnHandle_t handle,
814
+ cudnnLRNDescriptor_t normDesc,
815
+ cudnnDivNormMode_t mode,
816
+ const void *alpha,
817
+ const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */
818
+ const void *x,
819
+ const void *means, /* if NULL, means are assumed to be zero */
820
+ void *temp,
821
+ void *temp2,
822
+ const void *beta,
823
+ const cudnnTensorDescriptor_t yDesc,
824
+ void *y);
825
+
826
+ typedef enum {
827
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
828
+ CUDNN_BATCHNORM_PER_ACTIVATION = 0,
829
+
830
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
831
+ CUDNN_BATCHNORM_SPATIAL = 1,
832
+
833
+ /*
834
+ * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors).
835
+ * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values
836
+ */
837
+ CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2,
838
+ } cudnnBatchNormMode_t;
839
+
840
+ #define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */
841
+
842
+ /*
843
+ * Derives a tensor descriptor from layer data descriptor for BatchNormalization
844
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
845
+ * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions.
846
+ */
847
+ cudnnStatus_t CUDNNWINAPI
848
+ cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc,
849
+ const cudnnTensorDescriptor_t xDesc,
850
+ cudnnBatchNormMode_t mode);
851
+
852
+ typedef enum {
853
+ CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */
854
+ CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */
855
+ CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */
856
+ } cudnnBatchNormOps_t;
857
+
858
+ /*
859
+ * Performs Batch Normalization during Inference:
860
+ * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k]
861
+ * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed
862
+ * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining
863
+ * above for notes on function arguments.
864
+ */
865
+ cudnnStatus_t CUDNNWINAPI
866
+ cudnnBatchNormalizationForwardInference(cudnnHandle_t handle,
867
+ cudnnBatchNormMode_t mode,
868
+ const void *alpha, /* alpha[0] = result blend factor */
869
+ const void *beta, /* beta[0] = dest layer blend factor */
870
+ const cudnnTensorDescriptor_t xDesc,
871
+ const void *x, /* NxCxHxW */
872
+ const cudnnTensorDescriptor_t yDesc,
873
+ void *y, /* NxCxHxW */
874
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
875
+ const void *bnScale,
876
+ const void *bnBias,
877
+ const void *estimatedMean,
878
+ const void *estimatedVariance,
879
+ double epsilon);
880
+
881
+ typedef enum {
882
+ /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */
883
+ CUDNN_NORM_PER_ACTIVATION = 0,
884
+
885
+ /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */
886
+ CUDNN_NORM_PER_CHANNEL = 1,
887
+ } cudnnNormMode_t;
888
+
889
+ typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t;
890
+
891
+ /*
892
+ * Derives a tensor descriptor from layer data descriptor for Normalization
893
+ * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for
894
+ * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions.
895
+ */
896
+ cudnnStatus_t CUDNNWINAPI
897
+ cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc,
898
+ cudnnTensorDescriptor_t derivedNormMeanVarDesc,
899
+ const cudnnTensorDescriptor_t xDesc,
900
+ cudnnNormMode_t mode,
901
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
902
+
903
+ typedef enum {
904
+ CUDNN_NORM_OPS_NORM = 0, /* do normalization only */
905
+ CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */
906
+ CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */
907
+ } cudnnNormOps_t;
908
+
909
+ /*
910
+ * Performs Normalization during Inference:
911
+ * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k]
912
+ * with normScale, normBias, runningMean, runningInvVariance tensors indexed
913
+ * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining
914
+ * above for notes on function arguments.
915
+ */
916
+ cudnnStatus_t CUDNNWINAPI
917
+ cudnnNormalizationForwardInference(cudnnHandle_t handle,
918
+ cudnnNormMode_t mode,
919
+ cudnnNormOps_t normOps,
920
+ cudnnNormAlgo_t algo,
921
+ const void *alpha, /* alpha[0] = result blend factor */
922
+ const void *beta, /* beta[0] = dest layer blend factor */
923
+ const cudnnTensorDescriptor_t xDesc,
924
+ const void *x, /* NxCxHxW */
925
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
926
+ const void *normScale,
927
+ const void *normBias,
928
+ const cudnnTensorDescriptor_t normMeanVarDesc,
929
+ const void *estimatedMean,
930
+ const void *estimatedVariance,
931
+ const cudnnTensorDescriptor_t zDesc,
932
+ const void *z,
933
+ cudnnActivationDescriptor_t activationDesc,
934
+ const cudnnTensorDescriptor_t yDesc,
935
+ void *y, /* NxCxHxW */
936
+ double epsilon,
937
+ int groupCnt); /* Place hold for future work*/
938
+
939
+ /* APIs for spatial transformer network*/
940
+ typedef enum {
941
+ CUDNN_SAMPLER_BILINEAR = 0,
942
+ } cudnnSamplerType_t;
943
+
944
+ cudnnStatus_t CUDNNWINAPI
945
+ cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc);
946
+
947
+ cudnnStatus_t CUDNNWINAPI
948
+ cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc,
949
+ cudnnSamplerType_t samplerType,
950
+ cudnnDataType_t dataType,
951
+ const int nbDims,
952
+ const int dimA[]);
953
+
954
+ cudnnStatus_t CUDNNWINAPI
955
+ cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc);
956
+
957
+ cudnnStatus_t CUDNNWINAPI
958
+ cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle,
959
+ const cudnnSpatialTransformerDescriptor_t stDesc,
960
+ const void *theta,
961
+ void *grid);
962
+
963
+ cudnnStatus_t CUDNNWINAPI
964
+ cudnnSpatialTfSamplerForward(cudnnHandle_t handle,
965
+ cudnnSpatialTransformerDescriptor_t stDesc,
966
+ const void *alpha,
967
+ const cudnnTensorDescriptor_t xDesc,
968
+ const void *x,
969
+ const void *grid,
970
+ const void *beta,
971
+ cudnnTensorDescriptor_t yDesc,
972
+ void *y);
973
+
974
+ typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t;
975
+
976
+ cudnnStatus_t CUDNNWINAPI
977
+ cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc);
978
+
979
+ cudnnStatus_t CUDNNWINAPI
980
+ cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc);
981
+
982
+ /*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */
983
+ cudnnStatus_t CUDNNWINAPI
984
+ cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes);
985
+
986
+ /*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */
987
+ cudnnStatus_t CUDNNWINAPI
988
+ cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes);
989
+
990
+ cudnnStatus_t CUDNNWINAPI
991
+ cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
992
+ cudnnHandle_t handle,
993
+ float dropout,
994
+ void *states,
995
+ size_t stateSizeInBytes,
996
+ unsigned long long seed);
997
+
998
+ /* Restores the dropout descriptor to a previously saved-off state */
999
+ cudnnStatus_t CUDNNWINAPI
1000
+ cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1001
+ cudnnHandle_t handle,
1002
+ float dropout,
1003
+ void *states,
1004
+ size_t stateSizeInBytes,
1005
+ unsigned long long seed);
1006
+
1007
+ cudnnStatus_t CUDNNWINAPI
1008
+ cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc,
1009
+ cudnnHandle_t handle,
1010
+ float *dropout,
1011
+ void **states,
1012
+ unsigned long long *seed);
1013
+
1014
+ cudnnStatus_t CUDNNWINAPI
1015
+ cudnnDropoutForward(cudnnHandle_t handle,
1016
+ const cudnnDropoutDescriptor_t dropoutDesc,
1017
+ const cudnnTensorDescriptor_t xdesc,
1018
+ const void *x,
1019
+ const cudnnTensorDescriptor_t ydesc,
1020
+ void *y,
1021
+ void *reserveSpace,
1022
+ size_t reserveSpaceSizeInBytes);
1023
+
1024
+ /* TODO: remove */
1025
+
1026
+ typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t;
1027
+ typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t;
1028
+
1029
+ /* TODO: move these enums out to the appropriate submodule */
1030
+ typedef enum {
1031
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0,
1032
+ CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1,
1033
+ CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2,
1034
+ CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3,
1035
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4,
1036
+ CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5,
1037
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6,
1038
+ CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7,
1039
+ CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8
1040
+ } cudnnConvolutionFwdAlgo_t;
1041
+
1042
+ typedef enum {
1043
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */
1044
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1,
1045
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2,
1046
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */
1047
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */
1048
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5,
1049
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6,
1050
+ CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7
1051
+ } cudnnConvolutionBwdFilterAlgo_t;
1052
+
1053
+ typedef enum {
1054
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */
1055
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1,
1056
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2,
1057
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3,
1058
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4,
1059
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5,
1060
+ CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6
1061
+ } cudnnConvolutionBwdDataAlgo_t;
1062
+
1063
+ typedef enum {
1064
+ CUDNN_RNN_ALGO_STANDARD = 0,
1065
+ CUDNN_RNN_ALGO_PERSIST_STATIC = 1,
1066
+ CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2,
1067
+ CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3,
1068
+ CUDNN_RNN_ALGO_COUNT = 4,
1069
+ } cudnnRNNAlgo_t;
1070
+
1071
+ typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t;
1072
+
1073
+ /* TODO: remove */
1074
+ typedef struct cudnnAlgorithmUnionStruct {
1075
+ union Algorithm {
1076
+ cudnnConvolutionFwdAlgo_t convFwdAlgo;
1077
+ cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo;
1078
+ cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo;
1079
+ cudnnRNNAlgo_t RNNAlgo;
1080
+ cudnnCTCLossAlgo_t CTCLossAlgo;
1081
+ } algo;
1082
+ } cudnnAlgorithm_t;
1083
+
1084
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1085
+ cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc);
1086
+
1087
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1088
+ cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm);
1089
+
1090
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1091
+ cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm);
1092
+
1093
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1094
+ cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest);
1095
+
1096
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1097
+ cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc);
1098
+
1099
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1100
+ cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate);
1101
+
1102
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1103
+ cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf,
1104
+ cudnnAlgorithmDescriptor_t algoDesc,
1105
+ cudnnStatus_t status,
1106
+ float time,
1107
+ size_t memory);
1108
+
1109
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1110
+ cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf,
1111
+ cudnnAlgorithmDescriptor_t *algoDesc,
1112
+ cudnnStatus_t *status,
1113
+ float *time,
1114
+ size_t *memory);
1115
+
1116
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1117
+ cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy);
1118
+
1119
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1120
+ cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes);
1121
+
1122
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1123
+ cudnnSaveAlgorithm(cudnnHandle_t handle,
1124
+ cudnnAlgorithmDescriptor_t algoDesc,
1125
+ void *algoSpace,
1126
+ size_t algoSpaceSizeInBytes);
1127
+
1128
+ CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI
1129
+ cudnnRestoreAlgorithm(cudnnHandle_t handle,
1130
+ void *algoSpace,
1131
+ size_t algoSpaceSizeInBytes,
1132
+ cudnnAlgorithmDescriptor_t algoDesc);
1133
+
1134
+ typedef enum {
1135
+ CUDNN_SEV_FATAL = 0,
1136
+ CUDNN_SEV_ERROR = 1,
1137
+ CUDNN_SEV_WARNING = 2,
1138
+ CUDNN_SEV_INFO = 3,
1139
+ } cudnnSeverity_t;
1140
+
1141
+ /* Message masks to be used with cudnnSetCallback() */
1142
+ #define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR)
1143
+ #define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING)
1144
+ #define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO)
1145
+
1146
+ /* struct containing useful informaiton for each API call */
1147
+ typedef struct cudnnDebugStruct {
1148
+ unsigned cudnn_version;
1149
+ cudnnStatus_t cudnnStatus;
1150
+ unsigned time_sec; /* epoch time in seconds */
1151
+ unsigned time_usec; /* microseconds part of epoch time */
1152
+ unsigned time_delta; /* time since start in seconds */
1153
+ cudnnHandle_t handle; /* cudnn handle */
1154
+ cudaStream_t stream; /* cuda stream ID */
1155
+ unsigned long long pid; /* process ID */
1156
+ unsigned long long tid; /* thread ID */
1157
+ int cudaDeviceId; /* CUDA device ID */
1158
+ int reserved[15]; /* reserved for future use */
1159
+ } cudnnDebug_t;
1160
+
1161
+ typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg);
1162
+
1163
+ cudnnStatus_t CUDNNWINAPI
1164
+ cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr);
1165
+
1166
+ cudnnStatus_t CUDNNWINAPI
1167
+ cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr);
1168
+
1169
+ /*
1170
+ * \brief Cross-library version checker.
1171
+ * This function is implemented differently in each sub-library. Each sublib
1172
+ * checks whether its own version matches that of its dependencies.
1173
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
1174
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
1175
+ */
1176
+ cudnnStatus_t CUDNNWINAPI
1177
+ cudnnOpsInferVersionCheck(void);
1178
+
1179
+ #if defined(__cplusplus)
1180
+ }
1181
+ #endif
1182
+
1183
+ #endif /* CUDNN_OPS_INFER_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_train : cuDNN's basic training operations and algorithms.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_TRAIN_H_)
55
+ #define CUDNN_OPS_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_OPS_TRAIN_MAJOR 8
65
+ #define CUDNN_OPS_TRAIN_MINOR 9
66
+ #define CUDNN_OPS_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN OPS TRAIN!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* Function to perform backward softmax */
78
+ cudnnStatus_t CUDNNWINAPI
79
+ cudnnSoftmaxBackward(cudnnHandle_t handle,
80
+ cudnnSoftmaxAlgorithm_t algo,
81
+ cudnnSoftmaxMode_t mode,
82
+ const void *alpha,
83
+ const cudnnTensorDescriptor_t yDesc,
84
+ const void *y,
85
+ const cudnnTensorDescriptor_t dyDesc,
86
+ const void *dy,
87
+ const void *beta,
88
+ const cudnnTensorDescriptor_t dxDesc,
89
+ void *dx);
90
+
91
+ /* Function to perform backward pooling */
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnPoolingBackward(cudnnHandle_t handle,
94
+ const cudnnPoolingDescriptor_t poolingDesc,
95
+ const void *alpha,
96
+ const cudnnTensorDescriptor_t yDesc,
97
+ const void *y,
98
+ const cudnnTensorDescriptor_t dyDesc,
99
+ const void *dy,
100
+ const cudnnTensorDescriptor_t xDesc,
101
+ const void *x,
102
+ const void *beta,
103
+ const cudnnTensorDescriptor_t dxDesc,
104
+ void *dx);
105
+
106
+ /* Function to perform backward activation */
107
+ cudnnStatus_t CUDNNWINAPI
108
+ cudnnActivationBackward(cudnnHandle_t handle,
109
+ cudnnActivationDescriptor_t activationDesc,
110
+ const void *alpha,
111
+ const cudnnTensorDescriptor_t yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t xDesc,
116
+ const void *x,
117
+ const void *beta,
118
+ const cudnnTensorDescriptor_t dxDesc,
119
+ void *dx);
120
+
121
+ /* LRN cross-channel backward computation. Double parameters cast to tensor data type */
122
+ cudnnStatus_t CUDNNWINAPI
123
+ cudnnLRNCrossChannelBackward(cudnnHandle_t handle,
124
+ cudnnLRNDescriptor_t normDesc,
125
+ cudnnLRNMode_t lrnMode,
126
+ const void *alpha,
127
+ const cudnnTensorDescriptor_t yDesc,
128
+ const void *y,
129
+ const cudnnTensorDescriptor_t dyDesc,
130
+ const void *dy,
131
+ const cudnnTensorDescriptor_t xDesc,
132
+ const void *x,
133
+ const void *beta,
134
+ const cudnnTensorDescriptor_t dxDesc,
135
+ void *dx);
136
+
137
+ cudnnStatus_t CUDNNWINAPI
138
+ cudnnDivisiveNormalizationBackward(cudnnHandle_t handle,
139
+ cudnnLRNDescriptor_t normDesc,
140
+ cudnnDivNormMode_t mode,
141
+ const void *alpha,
142
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */
143
+ const void *x,
144
+ const void *means, /* if NULL, means are assumed to be zero */
145
+ const void *dy,
146
+ void *temp,
147
+ void *temp2,
148
+ const void *beta,
149
+ const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */
150
+ void *dx, /* output x differential */
151
+ void *dMeans); /* output means differential, can be NULL */
152
+
153
+ cudnnStatus_t CUDNNWINAPI
154
+ cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle,
155
+ cudnnBatchNormMode_t mode,
156
+ cudnnBatchNormOps_t bnOps,
157
+ const cudnnTensorDescriptor_t xDesc,
158
+ const cudnnTensorDescriptor_t zDesc,
159
+ const cudnnTensorDescriptor_t yDesc,
160
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
161
+ const cudnnActivationDescriptor_t activationDesc,
162
+ size_t *sizeInBytes);
163
+
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle,
166
+ cudnnBatchNormMode_t mode,
167
+ cudnnBatchNormOps_t bnOps,
168
+ const cudnnTensorDescriptor_t xDesc,
169
+ const cudnnTensorDescriptor_t yDesc,
170
+ const cudnnTensorDescriptor_t dyDesc,
171
+ const cudnnTensorDescriptor_t dzDesc,
172
+ const cudnnTensorDescriptor_t dxDesc,
173
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
174
+ const cudnnActivationDescriptor_t activationDesc,
175
+ size_t *sizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle,
179
+ cudnnBatchNormMode_t mode,
180
+ cudnnBatchNormOps_t bnOps,
181
+ const cudnnActivationDescriptor_t activationDesc,
182
+ const cudnnTensorDescriptor_t xDesc,
183
+ size_t *sizeInBytes);
184
+
185
+ /* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnBatchNormalizationForwardTraining(
188
+ cudnnHandle_t handle,
189
+ cudnnBatchNormMode_t mode,
190
+
191
+ const void *alpha, /* alpha[0] = result blend factor */
192
+ const void *beta, /* beta[0] = dest layer blend factor */
193
+
194
+ const cudnnTensorDescriptor_t xDesc,
195
+ const void *x, /* NxCxHxW */
196
+ const cudnnTensorDescriptor_t yDesc,
197
+ void *y, /* NxCxHxW */
198
+
199
+ /* Shared desc for the next 6 tensors in the argument list.
200
+ Data type to be set as follows:
201
+ type = (typeOf(x) == double) ? double : float
202
+ Dimensions for this descriptor depend on normalization mode
203
+ - Spatial Normalization : tensors are expected to have dims 1xCx1x1
204
+ (normalization is performed across NxHxW)
205
+ - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW
206
+ (normalization is performed across N) */
207
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
208
+
209
+ /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */
210
+ const void *bnScale,
211
+ const void *bnBias,
212
+
213
+ /* MUST use factor=1 in the very first call of a complete training cycle.
214
+ Use a factor=1/(1+n) at N-th call to the function to get
215
+ Cumulative Moving Average (CMA) behavior
216
+ CMA[n] = (x[1]+...+x[n])/n
217
+ Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) =
218
+ ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) =
219
+ CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */
220
+ double exponentialAverageFactor,
221
+
222
+ /* Used in Training phase only.
223
+ runningMean = newMean*factor + runningMean*(1-factor) */
224
+ void *resultRunningMean,
225
+ /* Output in training mode, input in inference. Is the moving average
226
+ of variance[x] (factor is applied in the same way as for runningMean) */
227
+ void *resultRunningVariance,
228
+
229
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
230
+ double epsilon,
231
+
232
+ /* Optionally save intermediate results from the forward pass here
233
+ - can be reused to speed up backward pass. NULL if unused */
234
+ void *resultSaveMean,
235
+ void *resultSaveInvVariance);
236
+
237
+ /* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnBatchNormalizationForwardTrainingEx(
240
+ cudnnHandle_t handle,
241
+ cudnnBatchNormMode_t mode,
242
+ cudnnBatchNormOps_t bnOps,
243
+
244
+ const void *alpha, /* alpha[0] = result blend factor */
245
+ const void *beta, /* beta[0] = dest layer blend factor */
246
+
247
+ const cudnnTensorDescriptor_t xDesc,
248
+ const void *xData,
249
+ const cudnnTensorDescriptor_t zDesc,
250
+ const void *zData,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ void *yData,
253
+
254
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
255
+ const void *bnScale,
256
+ const void *bnBias,
257
+
258
+ double exponentialAverageFactor,
259
+ void *resultRunningMean,
260
+ void *resultRunningVariance,
261
+
262
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
263
+ double epsilon,
264
+
265
+ /* Optionally save intermediate results from the forward pass here
266
+ - can be reused to speed up backward pass. NULL if unused */
267
+ void *resultSaveMean,
268
+ void *resultSaveInvVariance,
269
+
270
+ cudnnActivationDescriptor_t activationDesc,
271
+ void *workspace,
272
+ size_t workSpaceSizeInBytes,
273
+ void *reserveSpace,
274
+ size_t reserveSpaceSizeInBytes);
275
+
276
+ /* Performs backward pass of Batch Normalization layer. Returns x gradient,
277
+ * bnScale gradient and bnBias gradient */
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnBatchNormalizationBackward(cudnnHandle_t handle,
280
+ cudnnBatchNormMode_t mode,
281
+ const void *alphaDataDiff,
282
+ const void *betaDataDiff,
283
+ const void *alphaParamDiff,
284
+ const void *betaParamDiff,
285
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */
286
+ const void *x,
287
+ const cudnnTensorDescriptor_t dyDesc,
288
+ const void *dy,
289
+ const cudnnTensorDescriptor_t dxDesc,
290
+ void *dx,
291
+ /* Shared tensor desc for the 4 tensors below */
292
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
293
+ const void *bnScale, /* bnBias doesn't affect backpropagation */
294
+ /* scale and bias diff are not backpropagated below this layer */
295
+ void *dBnScaleResult,
296
+ void *dBnBiasResult,
297
+ /* Same epsilon as forward pass */
298
+ double epsilon,
299
+
300
+ /* Optionally cached intermediate results from
301
+ forward pass */
302
+ const void *savedMean,
303
+ const void *savedInvVariance);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle,
307
+ cudnnBatchNormMode_t mode,
308
+ cudnnBatchNormOps_t bnOps,
309
+
310
+ const void *alphaDataDiff,
311
+ const void *betaDataDiff,
312
+ const void *alphaParamDiff,
313
+ const void *betaParamDiff,
314
+ const cudnnTensorDescriptor_t xDesc,
315
+ const void *xData,
316
+ const cudnnTensorDescriptor_t yDesc,
317
+ const void *yData,
318
+ const cudnnTensorDescriptor_t dyDesc,
319
+ const void *dyData,
320
+ const cudnnTensorDescriptor_t dzDesc,
321
+ void *dzData,
322
+ const cudnnTensorDescriptor_t dxDesc,
323
+ void *dxData,
324
+
325
+ /* Shared tensor desc for the 4 tensors below */
326
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
327
+ const void *bnScaleData,
328
+ const void *bnBiasData, /* needed if there is activation */
329
+ void *dBnScaleData,
330
+ void *dBnBiasData,
331
+ double epsilon, /* Same epsilon as forward pass */
332
+
333
+ /* Optionally cached intermediate results from
334
+ forward pass */
335
+ const void *savedMean,
336
+ const void *savedInvVariance,
337
+ cudnnActivationDescriptor_t activationDesc,
338
+ void *workSpace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle,
345
+ cudnnNormMode_t mode,
346
+ cudnnNormOps_t normOps,
347
+ cudnnNormAlgo_t algo,
348
+ const cudnnTensorDescriptor_t xDesc,
349
+ const cudnnTensorDescriptor_t zDesc,
350
+ const cudnnTensorDescriptor_t yDesc,
351
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
352
+ const cudnnActivationDescriptor_t activationDesc,
353
+ const cudnnTensorDescriptor_t normMeanVarDesc,
354
+ size_t *sizeInBytes,
355
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
356
+
357
+ cudnnStatus_t CUDNNWINAPI
358
+ cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle,
359
+ cudnnNormMode_t mode,
360
+ cudnnNormOps_t normOps,
361
+ cudnnNormAlgo_t algo,
362
+ const cudnnTensorDescriptor_t xDesc,
363
+ const cudnnTensorDescriptor_t yDesc,
364
+ const cudnnTensorDescriptor_t dyDesc,
365
+ const cudnnTensorDescriptor_t dzDesc,
366
+ const cudnnTensorDescriptor_t dxDesc,
367
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
368
+ const cudnnActivationDescriptor_t activationDesc,
369
+ const cudnnTensorDescriptor_t normMeanVarDesc,
370
+ size_t *sizeInBytes,
371
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
372
+
373
+ cudnnStatus_t CUDNNWINAPI
374
+ cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle,
375
+ cudnnNormMode_t mode,
376
+ cudnnNormOps_t normOps,
377
+ cudnnNormAlgo_t algo,
378
+ const cudnnActivationDescriptor_t activationDesc,
379
+ const cudnnTensorDescriptor_t xDesc,
380
+ size_t *sizeInBytes,
381
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
382
+
383
+ /* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */
384
+ cudnnStatus_t CUDNNWINAPI
385
+ cudnnNormalizationForwardTraining(cudnnHandle_t handle,
386
+ cudnnNormMode_t mode,
387
+ cudnnNormOps_t normOps,
388
+ cudnnNormAlgo_t algo,
389
+ const void *alpha, /* alpha[0] = result blend factor */
390
+ const void *beta, /* beta[0] = dest layer blend factor */
391
+ const cudnnTensorDescriptor_t xDesc,
392
+ const void *xData,
393
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
394
+ const void *normScale,
395
+ const void *normBias,
396
+ double exponentialAverageFactor,
397
+ const cudnnTensorDescriptor_t normMeanVarDesc,
398
+ void *resultRunningMean,
399
+ void *resultRunningVariance,
400
+ /* Has to be >= 0. Should be the same in forward and backward functions. */
401
+ double epsilon,
402
+ /* Optionally save intermediate results from the forward pass here
403
+ - can be reused to speed up backward pass. NULL if unused */
404
+ void *resultSaveMean,
405
+ void *resultSaveInvVariance,
406
+ cudnnActivationDescriptor_t activationDesc,
407
+ const cudnnTensorDescriptor_t zDesc,
408
+ const void *zData,
409
+ const cudnnTensorDescriptor_t yDesc,
410
+ void *yData,
411
+ void *workspace,
412
+ size_t workSpaceSizeInBytes,
413
+ void *reserveSpace,
414
+ size_t reserveSpaceSizeInBytes,
415
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
416
+
417
+ cudnnStatus_t CUDNNWINAPI
418
+ cudnnNormalizationBackward(cudnnHandle_t handle,
419
+ cudnnNormMode_t mode,
420
+ cudnnNormOps_t normOps,
421
+ cudnnNormAlgo_t algo,
422
+ const void *alphaDataDiff,
423
+ const void *betaDataDiff,
424
+ const void *alphaParamDiff,
425
+ const void *betaParamDiff,
426
+ const cudnnTensorDescriptor_t xDesc,
427
+ const void *xData,
428
+ const cudnnTensorDescriptor_t yDesc,
429
+ const void *yData,
430
+ const cudnnTensorDescriptor_t dyDesc,
431
+ const void *dyData,
432
+ const cudnnTensorDescriptor_t dzDesc,
433
+ void *dzData,
434
+ const cudnnTensorDescriptor_t dxDesc,
435
+ void *dxData,
436
+ /* Shared tensor desc for the 4 tensors below */
437
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
438
+ const void *normScaleData,
439
+ const void *normBiasData, /* needed if there is activation */
440
+ void *dNormScaleData,
441
+ void *dNormBiasData,
442
+ double epsilon, /* Same epsilon as forward pass */
443
+ const cudnnTensorDescriptor_t normMeanVarDesc,
444
+ /* Optionally cached intermediate results from
445
+ forward pass */
446
+ const void *savedMean,
447
+ const void *savedInvVariance,
448
+ cudnnActivationDescriptor_t activationDesc,
449
+ void *workSpace,
450
+ size_t workSpaceSizeInBytes,
451
+ void *reserveSpace,
452
+ size_t reserveSpaceSizeInBytes,
453
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
454
+
455
+ cudnnStatus_t CUDNNWINAPI
456
+ cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle,
457
+ const cudnnSpatialTransformerDescriptor_t stDesc,
458
+ const void *dgrid,
459
+ void *dtheta);
460
+
461
+ cudnnStatus_t CUDNNWINAPI
462
+ cudnnSpatialTfSamplerBackward(cudnnHandle_t handle,
463
+ cudnnSpatialTransformerDescriptor_t stDesc,
464
+ const void *alpha,
465
+ const cudnnTensorDescriptor_t xDesc,
466
+ const void *x,
467
+ const void *beta,
468
+ const cudnnTensorDescriptor_t dxDesc,
469
+ void *dx,
470
+ const void *alphaDgrid,
471
+ const cudnnTensorDescriptor_t dyDesc,
472
+ const void *dy,
473
+ const void *grid,
474
+ const void *betaDgrid,
475
+ void *dgrid);
476
+
477
+ cudnnStatus_t CUDNNWINAPI
478
+ cudnnDropoutBackward(cudnnHandle_t handle,
479
+ const cudnnDropoutDescriptor_t dropoutDesc,
480
+ const cudnnTensorDescriptor_t dydesc,
481
+ const void *dy,
482
+ const cudnnTensorDescriptor_t dxdesc,
483
+ void *dx,
484
+ void *reserveSpace,
485
+ size_t reserveSpaceSizeInBytes);
486
+
487
+ /*
488
+ * \brief Cross-library version checker.
489
+ * This function is implemented differently in each sub-library. Each sublib
490
+ * checks whether its own version matches that of its dependencies.
491
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
492
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
493
+ */
494
+ cudnnStatus_t CUDNNWINAPI
495
+ cudnnOpsTrainVersionCheck(void);
496
+
497
+ #if defined(__cplusplus)
498
+ }
499
+ #endif
500
+
501
+ #endif /* CUDNN_OPS_TRAIN_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*
51
+ * cudnn_ops_train : cuDNN's basic training operations and algorithms.
52
+ */
53
+
54
+ #if !defined(CUDNN_OPS_TRAIN_H_)
55
+ #define CUDNN_OPS_TRAIN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+
63
+ /* These version numbers are autogenerated, do not edit manually. */
64
+ #define CUDNN_OPS_TRAIN_MAJOR 8
65
+ #define CUDNN_OPS_TRAIN_MINOR 9
66
+ #define CUDNN_OPS_TRAIN_PATCH 2
67
+
68
+ #if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \
69
+ (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL)
70
+ #error Version mismatch in cuDNN OPS TRAIN!!!
71
+ #endif
72
+
73
+ #if defined(__cplusplus)
74
+ extern "C" {
75
+ #endif
76
+
77
+ /* Function to perform backward softmax */
78
+ cudnnStatus_t CUDNNWINAPI
79
+ cudnnSoftmaxBackward(cudnnHandle_t handle,
80
+ cudnnSoftmaxAlgorithm_t algo,
81
+ cudnnSoftmaxMode_t mode,
82
+ const void *alpha,
83
+ const cudnnTensorDescriptor_t yDesc,
84
+ const void *y,
85
+ const cudnnTensorDescriptor_t dyDesc,
86
+ const void *dy,
87
+ const void *beta,
88
+ const cudnnTensorDescriptor_t dxDesc,
89
+ void *dx);
90
+
91
+ /* Function to perform backward pooling */
92
+ cudnnStatus_t CUDNNWINAPI
93
+ cudnnPoolingBackward(cudnnHandle_t handle,
94
+ const cudnnPoolingDescriptor_t poolingDesc,
95
+ const void *alpha,
96
+ const cudnnTensorDescriptor_t yDesc,
97
+ const void *y,
98
+ const cudnnTensorDescriptor_t dyDesc,
99
+ const void *dy,
100
+ const cudnnTensorDescriptor_t xDesc,
101
+ const void *x,
102
+ const void *beta,
103
+ const cudnnTensorDescriptor_t dxDesc,
104
+ void *dx);
105
+
106
+ /* Function to perform backward activation */
107
+ cudnnStatus_t CUDNNWINAPI
108
+ cudnnActivationBackward(cudnnHandle_t handle,
109
+ cudnnActivationDescriptor_t activationDesc,
110
+ const void *alpha,
111
+ const cudnnTensorDescriptor_t yDesc,
112
+ const void *y,
113
+ const cudnnTensorDescriptor_t dyDesc,
114
+ const void *dy,
115
+ const cudnnTensorDescriptor_t xDesc,
116
+ const void *x,
117
+ const void *beta,
118
+ const cudnnTensorDescriptor_t dxDesc,
119
+ void *dx);
120
+
121
+ /* LRN cross-channel backward computation. Double parameters cast to tensor data type */
122
+ cudnnStatus_t CUDNNWINAPI
123
+ cudnnLRNCrossChannelBackward(cudnnHandle_t handle,
124
+ cudnnLRNDescriptor_t normDesc,
125
+ cudnnLRNMode_t lrnMode,
126
+ const void *alpha,
127
+ const cudnnTensorDescriptor_t yDesc,
128
+ const void *y,
129
+ const cudnnTensorDescriptor_t dyDesc,
130
+ const void *dy,
131
+ const cudnnTensorDescriptor_t xDesc,
132
+ const void *x,
133
+ const void *beta,
134
+ const cudnnTensorDescriptor_t dxDesc,
135
+ void *dx);
136
+
137
+ cudnnStatus_t CUDNNWINAPI
138
+ cudnnDivisiveNormalizationBackward(cudnnHandle_t handle,
139
+ cudnnLRNDescriptor_t normDesc,
140
+ cudnnDivNormMode_t mode,
141
+ const void *alpha,
142
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */
143
+ const void *x,
144
+ const void *means, /* if NULL, means are assumed to be zero */
145
+ const void *dy,
146
+ void *temp,
147
+ void *temp2,
148
+ const void *beta,
149
+ const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */
150
+ void *dx, /* output x differential */
151
+ void *dMeans); /* output means differential, can be NULL */
152
+
153
+ cudnnStatus_t CUDNNWINAPI
154
+ cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle,
155
+ cudnnBatchNormMode_t mode,
156
+ cudnnBatchNormOps_t bnOps,
157
+ const cudnnTensorDescriptor_t xDesc,
158
+ const cudnnTensorDescriptor_t zDesc,
159
+ const cudnnTensorDescriptor_t yDesc,
160
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
161
+ const cudnnActivationDescriptor_t activationDesc,
162
+ size_t *sizeInBytes);
163
+
164
+ cudnnStatus_t CUDNNWINAPI
165
+ cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle,
166
+ cudnnBatchNormMode_t mode,
167
+ cudnnBatchNormOps_t bnOps,
168
+ const cudnnTensorDescriptor_t xDesc,
169
+ const cudnnTensorDescriptor_t yDesc,
170
+ const cudnnTensorDescriptor_t dyDesc,
171
+ const cudnnTensorDescriptor_t dzDesc,
172
+ const cudnnTensorDescriptor_t dxDesc,
173
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
174
+ const cudnnActivationDescriptor_t activationDesc,
175
+ size_t *sizeInBytes);
176
+
177
+ cudnnStatus_t CUDNNWINAPI
178
+ cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle,
179
+ cudnnBatchNormMode_t mode,
180
+ cudnnBatchNormOps_t bnOps,
181
+ const cudnnActivationDescriptor_t activationDesc,
182
+ const cudnnTensorDescriptor_t xDesc,
183
+ size_t *sizeInBytes);
184
+
185
+ /* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */
186
+ cudnnStatus_t CUDNNWINAPI
187
+ cudnnBatchNormalizationForwardTraining(
188
+ cudnnHandle_t handle,
189
+ cudnnBatchNormMode_t mode,
190
+
191
+ const void *alpha, /* alpha[0] = result blend factor */
192
+ const void *beta, /* beta[0] = dest layer blend factor */
193
+
194
+ const cudnnTensorDescriptor_t xDesc,
195
+ const void *x, /* NxCxHxW */
196
+ const cudnnTensorDescriptor_t yDesc,
197
+ void *y, /* NxCxHxW */
198
+
199
+ /* Shared desc for the next 6 tensors in the argument list.
200
+ Data type to be set as follows:
201
+ type = (typeOf(x) == double) ? double : float
202
+ Dimensions for this descriptor depend on normalization mode
203
+ - Spatial Normalization : tensors are expected to have dims 1xCx1x1
204
+ (normalization is performed across NxHxW)
205
+ - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW
206
+ (normalization is performed across N) */
207
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
208
+
209
+ /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */
210
+ const void *bnScale,
211
+ const void *bnBias,
212
+
213
+ /* MUST use factor=1 in the very first call of a complete training cycle.
214
+ Use a factor=1/(1+n) at N-th call to the function to get
215
+ Cumulative Moving Average (CMA) behavior
216
+ CMA[n] = (x[1]+...+x[n])/n
217
+ Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) =
218
+ ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) =
219
+ CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */
220
+ double exponentialAverageFactor,
221
+
222
+ /* Used in Training phase only.
223
+ runningMean = newMean*factor + runningMean*(1-factor) */
224
+ void *resultRunningMean,
225
+ /* Output in training mode, input in inference. Is the moving average
226
+ of variance[x] (factor is applied in the same way as for runningMean) */
227
+ void *resultRunningVariance,
228
+
229
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
230
+ double epsilon,
231
+
232
+ /* Optionally save intermediate results from the forward pass here
233
+ - can be reused to speed up backward pass. NULL if unused */
234
+ void *resultSaveMean,
235
+ void *resultSaveInvVariance);
236
+
237
+ /* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */
238
+ cudnnStatus_t CUDNNWINAPI
239
+ cudnnBatchNormalizationForwardTrainingEx(
240
+ cudnnHandle_t handle,
241
+ cudnnBatchNormMode_t mode,
242
+ cudnnBatchNormOps_t bnOps,
243
+
244
+ const void *alpha, /* alpha[0] = result blend factor */
245
+ const void *beta, /* beta[0] = dest layer blend factor */
246
+
247
+ const cudnnTensorDescriptor_t xDesc,
248
+ const void *xData,
249
+ const cudnnTensorDescriptor_t zDesc,
250
+ const void *zData,
251
+ const cudnnTensorDescriptor_t yDesc,
252
+ void *yData,
253
+
254
+ const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc,
255
+ const void *bnScale,
256
+ const void *bnBias,
257
+
258
+ double exponentialAverageFactor,
259
+ void *resultRunningMean,
260
+ void *resultRunningVariance,
261
+
262
+ /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */
263
+ double epsilon,
264
+
265
+ /* Optionally save intermediate results from the forward pass here
266
+ - can be reused to speed up backward pass. NULL if unused */
267
+ void *resultSaveMean,
268
+ void *resultSaveInvVariance,
269
+
270
+ cudnnActivationDescriptor_t activationDesc,
271
+ void *workspace,
272
+ size_t workSpaceSizeInBytes,
273
+ void *reserveSpace,
274
+ size_t reserveSpaceSizeInBytes);
275
+
276
+ /* Performs backward pass of Batch Normalization layer. Returns x gradient,
277
+ * bnScale gradient and bnBias gradient */
278
+ cudnnStatus_t CUDNNWINAPI
279
+ cudnnBatchNormalizationBackward(cudnnHandle_t handle,
280
+ cudnnBatchNormMode_t mode,
281
+ const void *alphaDataDiff,
282
+ const void *betaDataDiff,
283
+ const void *alphaParamDiff,
284
+ const void *betaParamDiff,
285
+ const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */
286
+ const void *x,
287
+ const cudnnTensorDescriptor_t dyDesc,
288
+ const void *dy,
289
+ const cudnnTensorDescriptor_t dxDesc,
290
+ void *dx,
291
+ /* Shared tensor desc for the 4 tensors below */
292
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
293
+ const void *bnScale, /* bnBias doesn't affect backpropagation */
294
+ /* scale and bias diff are not backpropagated below this layer */
295
+ void *dBnScaleResult,
296
+ void *dBnBiasResult,
297
+ /* Same epsilon as forward pass */
298
+ double epsilon,
299
+
300
+ /* Optionally cached intermediate results from
301
+ forward pass */
302
+ const void *savedMean,
303
+ const void *savedInvVariance);
304
+
305
+ cudnnStatus_t CUDNNWINAPI
306
+ cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle,
307
+ cudnnBatchNormMode_t mode,
308
+ cudnnBatchNormOps_t bnOps,
309
+
310
+ const void *alphaDataDiff,
311
+ const void *betaDataDiff,
312
+ const void *alphaParamDiff,
313
+ const void *betaParamDiff,
314
+ const cudnnTensorDescriptor_t xDesc,
315
+ const void *xData,
316
+ const cudnnTensorDescriptor_t yDesc,
317
+ const void *yData,
318
+ const cudnnTensorDescriptor_t dyDesc,
319
+ const void *dyData,
320
+ const cudnnTensorDescriptor_t dzDesc,
321
+ void *dzData,
322
+ const cudnnTensorDescriptor_t dxDesc,
323
+ void *dxData,
324
+
325
+ /* Shared tensor desc for the 4 tensors below */
326
+ const cudnnTensorDescriptor_t dBnScaleBiasDesc,
327
+ const void *bnScaleData,
328
+ const void *bnBiasData, /* needed if there is activation */
329
+ void *dBnScaleData,
330
+ void *dBnBiasData,
331
+ double epsilon, /* Same epsilon as forward pass */
332
+
333
+ /* Optionally cached intermediate results from
334
+ forward pass */
335
+ const void *savedMean,
336
+ const void *savedInvVariance,
337
+ cudnnActivationDescriptor_t activationDesc,
338
+ void *workSpace,
339
+ size_t workSpaceSizeInBytes,
340
+ void *reserveSpace,
341
+ size_t reserveSpaceSizeInBytes);
342
+
343
+ cudnnStatus_t CUDNNWINAPI
344
+ cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle,
345
+ cudnnNormMode_t mode,
346
+ cudnnNormOps_t normOps,
347
+ cudnnNormAlgo_t algo,
348
+ const cudnnTensorDescriptor_t xDesc,
349
+ const cudnnTensorDescriptor_t zDesc,
350
+ const cudnnTensorDescriptor_t yDesc,
351
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
352
+ const cudnnActivationDescriptor_t activationDesc,
353
+ const cudnnTensorDescriptor_t normMeanVarDesc,
354
+ size_t *sizeInBytes,
355
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
356
+
357
+ cudnnStatus_t CUDNNWINAPI
358
+ cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle,
359
+ cudnnNormMode_t mode,
360
+ cudnnNormOps_t normOps,
361
+ cudnnNormAlgo_t algo,
362
+ const cudnnTensorDescriptor_t xDesc,
363
+ const cudnnTensorDescriptor_t yDesc,
364
+ const cudnnTensorDescriptor_t dyDesc,
365
+ const cudnnTensorDescriptor_t dzDesc,
366
+ const cudnnTensorDescriptor_t dxDesc,
367
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
368
+ const cudnnActivationDescriptor_t activationDesc,
369
+ const cudnnTensorDescriptor_t normMeanVarDesc,
370
+ size_t *sizeInBytes,
371
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
372
+
373
+ cudnnStatus_t CUDNNWINAPI
374
+ cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle,
375
+ cudnnNormMode_t mode,
376
+ cudnnNormOps_t normOps,
377
+ cudnnNormAlgo_t algo,
378
+ const cudnnActivationDescriptor_t activationDesc,
379
+ const cudnnTensorDescriptor_t xDesc,
380
+ size_t *sizeInBytes,
381
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
382
+
383
+ /* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */
384
+ cudnnStatus_t CUDNNWINAPI
385
+ cudnnNormalizationForwardTraining(cudnnHandle_t handle,
386
+ cudnnNormMode_t mode,
387
+ cudnnNormOps_t normOps,
388
+ cudnnNormAlgo_t algo,
389
+ const void *alpha, /* alpha[0] = result blend factor */
390
+ const void *beta, /* beta[0] = dest layer blend factor */
391
+ const cudnnTensorDescriptor_t xDesc,
392
+ const void *xData,
393
+ const cudnnTensorDescriptor_t normScaleBiasDesc,
394
+ const void *normScale,
395
+ const void *normBias,
396
+ double exponentialAverageFactor,
397
+ const cudnnTensorDescriptor_t normMeanVarDesc,
398
+ void *resultRunningMean,
399
+ void *resultRunningVariance,
400
+ /* Has to be >= 0. Should be the same in forward and backward functions. */
401
+ double epsilon,
402
+ /* Optionally save intermediate results from the forward pass here
403
+ - can be reused to speed up backward pass. NULL if unused */
404
+ void *resultSaveMean,
405
+ void *resultSaveInvVariance,
406
+ cudnnActivationDescriptor_t activationDesc,
407
+ const cudnnTensorDescriptor_t zDesc,
408
+ const void *zData,
409
+ const cudnnTensorDescriptor_t yDesc,
410
+ void *yData,
411
+ void *workspace,
412
+ size_t workSpaceSizeInBytes,
413
+ void *reserveSpace,
414
+ size_t reserveSpaceSizeInBytes,
415
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
416
+
417
+ cudnnStatus_t CUDNNWINAPI
418
+ cudnnNormalizationBackward(cudnnHandle_t handle,
419
+ cudnnNormMode_t mode,
420
+ cudnnNormOps_t normOps,
421
+ cudnnNormAlgo_t algo,
422
+ const void *alphaDataDiff,
423
+ const void *betaDataDiff,
424
+ const void *alphaParamDiff,
425
+ const void *betaParamDiff,
426
+ const cudnnTensorDescriptor_t xDesc,
427
+ const void *xData,
428
+ const cudnnTensorDescriptor_t yDesc,
429
+ const void *yData,
430
+ const cudnnTensorDescriptor_t dyDesc,
431
+ const void *dyData,
432
+ const cudnnTensorDescriptor_t dzDesc,
433
+ void *dzData,
434
+ const cudnnTensorDescriptor_t dxDesc,
435
+ void *dxData,
436
+ /* Shared tensor desc for the 4 tensors below */
437
+ const cudnnTensorDescriptor_t dNormScaleBiasDesc,
438
+ const void *normScaleData,
439
+ const void *normBiasData, /* needed if there is activation */
440
+ void *dNormScaleData,
441
+ void *dNormBiasData,
442
+ double epsilon, /* Same epsilon as forward pass */
443
+ const cudnnTensorDescriptor_t normMeanVarDesc,
444
+ /* Optionally cached intermediate results from
445
+ forward pass */
446
+ const void *savedMean,
447
+ const void *savedInvVariance,
448
+ cudnnActivationDescriptor_t activationDesc,
449
+ void *workSpace,
450
+ size_t workSpaceSizeInBytes,
451
+ void *reserveSpace,
452
+ size_t reserveSpaceSizeInBytes,
453
+ int groupCnt); /* Place hold for future work, should be set to 1 now*/
454
+
455
+ cudnnStatus_t CUDNNWINAPI
456
+ cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle,
457
+ const cudnnSpatialTransformerDescriptor_t stDesc,
458
+ const void *dgrid,
459
+ void *dtheta);
460
+
461
+ cudnnStatus_t CUDNNWINAPI
462
+ cudnnSpatialTfSamplerBackward(cudnnHandle_t handle,
463
+ cudnnSpatialTransformerDescriptor_t stDesc,
464
+ const void *alpha,
465
+ const cudnnTensorDescriptor_t xDesc,
466
+ const void *x,
467
+ const void *beta,
468
+ const cudnnTensorDescriptor_t dxDesc,
469
+ void *dx,
470
+ const void *alphaDgrid,
471
+ const cudnnTensorDescriptor_t dyDesc,
472
+ const void *dy,
473
+ const void *grid,
474
+ const void *betaDgrid,
475
+ void *dgrid);
476
+
477
+ cudnnStatus_t CUDNNWINAPI
478
+ cudnnDropoutBackward(cudnnHandle_t handle,
479
+ const cudnnDropoutDescriptor_t dropoutDesc,
480
+ const cudnnTensorDescriptor_t dydesc,
481
+ const void *dy,
482
+ const cudnnTensorDescriptor_t dxdesc,
483
+ void *dx,
484
+ void *reserveSpace,
485
+ size_t reserveSpaceSizeInBytes);
486
+
487
+ /*
488
+ * \brief Cross-library version checker.
489
+ * This function is implemented differently in each sub-library. Each sublib
490
+ * checks whether its own version matches that of its dependencies.
491
+ * \returns CUDNN_STATUS_SUCCESS if the version check passes,
492
+ * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent.
493
+ */
494
+ cudnnStatus_t CUDNNWINAPI
495
+ cudnnOpsTrainVersionCheck(void);
496
+
497
+ #if defined(__cplusplus)
498
+ }
499
+ #endif
500
+
501
+ #endif /* CUDNN_OPS_TRAIN_H_ */
llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2014-2023 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /* cudnn : Neural Networks Library
51
+
52
+ */
53
+
54
+ #if !defined(CUDNN_H_)
55
+ #define CUDNN_H_
56
+
57
+ #include <cuda_runtime.h>
58
+ #include <stdint.h>
59
+
60
+ #include "cudnn_version.h"
61
+ #include "cudnn_ops_infer.h"
62
+ #include "cudnn_ops_train.h"
63
+ #include "cudnn_adv_infer.h"
64
+ #include "cudnn_adv_train.h"
65
+ #include "cudnn_cnn_infer.h"
66
+ #include "cudnn_cnn_train.h"
67
+
68
+ #include "cudnn_backend.h"
69
+
70
+ #if defined(__cplusplus)
71
+ extern "C" {
72
+ #endif
73
+
74
+ #if defined(__cplusplus)
75
+ }
76
+ #endif
77
+
78
+ #endif /* CUDNN_H_ */